]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless
authorJohn W. Linville <linville@tuxdriver.com>
Wed, 26 Jun 2013 16:01:42 +0000 (12:01 -0400)
committerJohn W. Linville <linville@tuxdriver.com>
Wed, 26 Jun 2013 16:01:42 +0000 (12:01 -0400)
826 files changed:
Documentation/DocBook/80211.tmpl
Documentation/devicetree/bindings/mips/ralink.txt [new file with mode: 0644]
Documentation/devicetree/bindings/vendor-prefixes.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/include/asm/Kbuild
arch/arc/include/asm/cache.h
arch/arc/include/asm/cacheflush.h
arch/arc/include/asm/page.h
arch/arc/include/asm/pgtable.h
arch/arc/include/asm/shmparam.h [new file with mode: 0644]
arch/arc/include/asm/tlb.h
arch/arc/mm/Makefile
arch/arc/mm/cache_arc700.c
arch/arc/mm/mmap.c [new file with mode: 0644]
arch/arc/mm/tlb.c
arch/arc/plat-tb10x/Kconfig
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/sys32.S
arch/blackfin/Makefile
arch/blackfin/boot/Makefile
arch/blackfin/include/asm/atomic.h
arch/blackfin/include/asm/bfin_sdh.h
arch/blackfin/include/asm/bitops.h
arch/blackfin/include/asm/def_LPBlackfin.h
arch/blackfin/include/asm/mem_init.h
arch/blackfin/kernel/cplb-nompu/cplbinit.c
arch/blackfin/kernel/cplb-nompu/cplbmgr.c
arch/blackfin/kernel/cplbinfo.c
arch/blackfin/kernel/setup.c
arch/blackfin/mach-bf537/boards/stamp.c
arch/blackfin/mach-bf538/boards/ezkit.c
arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h
arch/m68k/Kconfig.cpu
arch/m68k/Kconfig.machine
arch/m68k/Makefile
arch/m68k/include/asm/commproc.h
arch/m68k/include/asm/dbg.h [deleted file]
arch/m68k/include/asm/dma.h
arch/m68k/include/asm/m532xsim.h [deleted file]
arch/m68k/include/asm/m53xxacr.h
arch/m68k/include/asm/m53xxsim.h [new file with mode: 0644]
arch/m68k/include/asm/m54xxacr.h
arch/m68k/include/asm/mcfgpio.h
arch/m68k/include/asm/mcfsim.h
arch/m68k/include/asm/mcftimer.h
arch/m68k/platform/coldfire/Makefile
arch/m68k/platform/coldfire/m532x.c [deleted file]
arch/m68k/platform/coldfire/m53xx.c [new file with mode: 0644]
arch/m68k/platform/coldfire/timers.c
arch/microblaze/configs/mmu_defconfig
arch/microblaze/include/asm/pci.h
arch/microblaze/include/asm/uaccess.h
arch/microblaze/kernel/cpu/cpuinfo.c
arch/microblaze/kernel/head.S
arch/microblaze/kernel/intc.c
arch/microblaze/kernel/process.c
arch/microblaze/mm/init.c
arch/microblaze/pci/pci-common.c
arch/mips/Kbuild
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/alchemy/Kconfig
arch/mips/alchemy/Platform
arch/mips/ar7/memory.c
arch/mips/ath79/setup.c
arch/mips/bcm63xx/Kconfig
arch/mips/bcm63xx/boards/board_bcm963xx.c
arch/mips/bcm63xx/clk.c
arch/mips/bcm63xx/cpu.c
arch/mips/bcm63xx/dev-flash.c
arch/mips/bcm63xx/dev-spi.c
arch/mips/bcm63xx/irq.c
arch/mips/bcm63xx/prom.c
arch/mips/bcm63xx/reset.c
arch/mips/bcm63xx/setup.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/configs/malta_defconfig
arch/mips/configs/malta_kvm_defconfig [new file with mode: 0644]
arch/mips/configs/malta_kvm_guest_defconfig [new file with mode: 0644]
arch/mips/configs/maltaaprp_defconfig [new file with mode: 0644]
arch/mips/configs/maltasmtc_defconfig [new file with mode: 0644]
arch/mips/configs/maltasmvp_defconfig [new file with mode: 0644]
arch/mips/configs/maltaup_defconfig [new file with mode: 0644]
arch/mips/configs/sead3_defconfig
arch/mips/configs/sead3micro_defconfig [new file with mode: 0644]
arch/mips/fw/lib/Makefile
arch/mips/fw/lib/cmdline.c [new file with mode: 0644]
arch/mips/include/asm/asm.h
arch/mips/include/asm/bootinfo.h
arch/mips/include/asm/branch.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/dma-coherence.h [new file with mode: 0644]
arch/mips/include/asm/dma-mapping.h
arch/mips/include/asm/fpu_emulator.h
arch/mips/include/asm/fw/fw.h [new file with mode: 0644]
arch/mips/include/asm/gic.h
arch/mips/include/asm/hazards.h
arch/mips/include/asm/inst.h
arch/mips/include/asm/irqflags.h
arch/mips/include/asm/kvm.h [new file with mode: 0644]
arch/mips/include/asm/kvm_host.h [new file with mode: 0644]
arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h [deleted file]
arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
arch/mips/include/asm/mach-bcm63xx/ioremap.h
arch/mips/include/asm/mach-generic/dma-coherence.h
arch/mips/include/asm/mach-generic/spaces.h
arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
arch/mips/include/asm/mach-ralink/mt7620.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt288x.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt305x.h
arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt3883.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
arch/mips/include/asm/mips-boards/generic.h
arch/mips/include/asm/mips-boards/prom.h [deleted file]
arch/mips/include/asm/mips_machine.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/netlogic/haldefs.h
arch/mips/include/asm/netlogic/mips-extns.h
arch/mips/include/asm/netlogic/xlp-hal/pic.h
arch/mips/include/asm/netlogic/xlp-hal/usb.h [deleted file]
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/processor.h
arch/mips/include/asm/prom.h
arch/mips/include/asm/sn/sn_private.h
arch/mips/include/asm/sn/types.h
arch/mips/include/asm/spinlock.h
arch/mips/include/asm/stackframe.h
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/time.h
arch/mips/include/asm/uaccess.h
arch/mips/include/asm/uasm.h
arch/mips/include/uapi/asm/inst.h
arch/mips/kernel/Makefile
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/binfmt_elfo32.c
arch/mips/kernel/branch.c
arch/mips/kernel/cevt-gic.c [new file with mode: 0644]
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/csrc-gic.c
arch/mips/kernel/genex.S
arch/mips/kernel/irq-gic.c
arch/mips/kernel/linux32.c
arch/mips/kernel/mips_machine.c
arch/mips/kernel/proc.c
arch/mips/kernel/process.c
arch/mips/kernel/prom.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/setup.c
arch/mips/kernel/signal.c
arch/mips/kernel/smp-mt.c
arch/mips/kernel/smp.c
arch/mips/kernel/smtc-asm.S
arch/mips/kernel/smtc.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/kvm/00README.txt [new file with mode: 0644]
arch/mips/kvm/Kconfig [new file with mode: 0644]
arch/mips/kvm/Makefile [new file with mode: 0644]
arch/mips/kvm/kvm_cb.c [new file with mode: 0644]
arch/mips/kvm/kvm_locore.S [new file with mode: 0644]
arch/mips/kvm/kvm_mips.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_comm.h [new file with mode: 0644]
arch/mips/kvm/kvm_mips_commpage.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_dyntrans.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_emul.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_int.c [new file with mode: 0644]
arch/mips/kvm/kvm_mips_int.h [new file with mode: 0644]
arch/mips/kvm/kvm_mips_opcode.h [new file with mode: 0644]
arch/mips/kvm/kvm_mips_stats.c [new file with mode: 0644]
arch/mips/kvm/kvm_tlb.c [new file with mode: 0644]
arch/mips/kvm/kvm_trap_emul.c [new file with mode: 0644]
arch/mips/kvm/trace.h [new file with mode: 0644]
arch/mips/lib/bitops.c
arch/mips/lib/dump_tlb.c
arch/mips/lib/memset.S
arch/mips/lib/mips-atomic.c
arch/mips/lib/r3k_dump_tlb.c
arch/mips/lib/strlen_user.S
arch/mips/lib/strncpy_user.S
arch/mips/lib/strnlen_user.S
arch/mips/math-emu/cp1emu.c
arch/mips/math-emu/dsemul.c
arch/mips/mm/Makefile
arch/mips/mm/c-r4k.c
arch/mips/mm/cache.c
arch/mips/mm/dma-default.c
arch/mips/mm/page.c
arch/mips/mm/tlb-r3k.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlb-r8k.c
arch/mips/mm/tlbex.c
arch/mips/mm/uasm-micromips.c [new file with mode: 0644]
arch/mips/mm/uasm-mips.c [new file with mode: 0644]
arch/mips/mm/uasm.c
arch/mips/mti-malta/Makefile
arch/mips/mti-malta/Platform
arch/mips/mti-malta/malta-cmdline.c [deleted file]
arch/mips/mti-malta/malta-display.c
arch/mips/mti-malta/malta-init.c
arch/mips/mti-malta/malta-int.c
arch/mips/mti-malta/malta-memory.c
arch/mips/mti-malta/malta-setup.c
arch/mips/mti-malta/malta-time.c
arch/mips/mti-sead3/Makefile
arch/mips/mti-sead3/leds-sead3.c
arch/mips/mti-sead3/sead3-cmdline.c [deleted file]
arch/mips/mti-sead3/sead3-console.c
arch/mips/mti-sead3/sead3-display.c
arch/mips/mti-sead3/sead3-init.c
arch/mips/mti-sead3/sead3-int.c
arch/mips/mti-sead3/sead3-setup.c
arch/mips/mti-sead3/sead3-time.c
arch/mips/netlogic/Kconfig
arch/mips/netlogic/common/smp.c
arch/mips/netlogic/dts/Makefile
arch/mips/netlogic/dts/xlp_evp.dts
arch/mips/netlogic/dts/xlp_svp.dts [new file with mode: 0644]
arch/mips/netlogic/xlp/nlm_hal.c
arch/mips/netlogic/xlp/setup.c
arch/mips/netlogic/xlp/usb-init.c
arch/mips/oprofile/op_model_mipsxx.c
arch/mips/pci/pci-ar71xx.c
arch/mips/pci/pci-ar724x.c
arch/mips/pci/pci-bcm63xx.c
arch/mips/powertv/init.c
arch/mips/powertv/init.h
arch/mips/powertv/memory.c
arch/mips/powertv/powertv_setup.c
arch/mips/ralink/Kconfig
arch/mips/ralink/Makefile
arch/mips/ralink/Platform
arch/mips/ralink/common.h
arch/mips/ralink/dts/Makefile
arch/mips/ralink/dts/mt7620a.dtsi [new file with mode: 0644]
arch/mips/ralink/dts/mt7620a_eval.dts [new file with mode: 0644]
arch/mips/ralink/dts/rt2880.dtsi [new file with mode: 0644]
arch/mips/ralink/dts/rt2880_eval.dts [new file with mode: 0644]
arch/mips/ralink/dts/rt3050.dtsi
arch/mips/ralink/dts/rt3052_eval.dts
arch/mips/ralink/dts/rt3883.dtsi [new file with mode: 0644]
arch/mips/ralink/dts/rt3883_eval.dts [new file with mode: 0644]
arch/mips/ralink/early_printk.c
arch/mips/ralink/irq.c
arch/mips/ralink/mt7620.c [new file with mode: 0644]
arch/mips/ralink/of.c
arch/mips/ralink/rt288x.c [new file with mode: 0644]
arch/mips/ralink/rt305x.c
arch/mips/ralink/rt3883.c [new file with mode: 0644]
arch/mips/sgi-ip27/ip27-klnuma.c
arch/mips/sgi-ip27/ip27-memory.c
arch/mips/sgi-ip27/ip27-timer.c
arch/parisc/kernel/sys_parisc32.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/sys_ppc32.c
arch/s390/kernel/compat_wrapper.S
arch/s390/kernel/syscalls.S
arch/sparc/kernel/sys32.S
arch/sparc/kernel/systbls_64.S
arch/unicore32/kernel/sys.c
arch/x86/ia32/sys_ia32.c
arch/x86/include/asm/sys_ia32.h
arch/x86/include/asm/syscalls.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/vm86_32.c
arch/x86/kvm/emulate.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/pci/xen.c
arch/x86/syscalls/syscall_32.tbl
arch/x86/xen/enlighten.c
arch/x86/xen/spinlock.c
drivers/bcma/Kconfig
drivers/bcma/core.c
drivers/bcma/driver_chipcommon_sflash.c
drivers/bcma/sprom.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btusb.c
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-steelseries.c
drivers/idle/intel_idle.c
drivers/md/dm-bufio.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-policy.h
drivers/md/dm-cache-target.c
drivers/md/dm-mpath.c
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-table.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin-metadata.h
drivers/md/dm-thin.c
drivers/md/persistent-data/dm-space-map-disk.c
drivers/md/persistent-data/dm-space-map-metadata.c
drivers/md/persistent-data/dm-space-map.h
drivers/net/wireless/Kconfig
drivers/net/wireless/Makefile
drivers/net/wireless/ath/Kconfig
drivers/net/wireless/ath/Makefile
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath10k/Kconfig [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/Makefile [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/bmi.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/bmi.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/ce.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/ce.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/core.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/core.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/debug.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/debug.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/hif.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/htc.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/htc.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/htt.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/htt.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/htt_rx.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/htt_tx.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/hw.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/mac.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/mac.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/pci.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/pci.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/rx_desc.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/targaddrs.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/trace.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/trace.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/txrx.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/txrx.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/wmi.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/wmi.h [new file with mode: 0644]
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/base.h
drivers/net/wireless/ath/ath5k/mac80211-ops.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/debug.c
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/ath6kl/sdio.c
drivers/net/wireless/ath/ath6kl/usb.c
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/ath/ath9k/ani.c
drivers/net/wireless/ath/ath9k/ani.h
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9002_hw.c
drivers/net/wireless/ath/ath9k/ar9002_initvals.h
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_hw.c
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/ar9003_paprd.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/calib.c
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
drivers/net/wireless/ath/ath9k/htc_drv_debug.c
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/link.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/ath/ath9k/wow.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/carl9170.h
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/tx.c
drivers/net/wireless/ath/regd.c
drivers/net/wireless/ath/wil6210/Kconfig
drivers/net/wireless/ath/wil6210/Makefile
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debug.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/trace.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/trace.h [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/txrx.h
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/b43/Kconfig
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/brcm80211/brcmfmac/fweh.h
drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
drivers/net/wireless/cw1200/Kconfig [new file with mode: 0644]
drivers/net/wireless/cw1200/Makefile [new file with mode: 0644]
drivers/net/wireless/cw1200/bh.c [new file with mode: 0644]
drivers/net/wireless/cw1200/bh.h [new file with mode: 0644]
drivers/net/wireless/cw1200/cw1200.h [new file with mode: 0644]
drivers/net/wireless/cw1200/cw1200_sdio.c [new file with mode: 0644]
drivers/net/wireless/cw1200/cw1200_spi.c [new file with mode: 0644]
drivers/net/wireless/cw1200/debug.c [new file with mode: 0644]
drivers/net/wireless/cw1200/debug.h [new file with mode: 0644]
drivers/net/wireless/cw1200/fwio.c [new file with mode: 0644]
drivers/net/wireless/cw1200/fwio.h [new file with mode: 0644]
drivers/net/wireless/cw1200/hwbus.h [new file with mode: 0644]
drivers/net/wireless/cw1200/hwio.c [new file with mode: 0644]
drivers/net/wireless/cw1200/hwio.h [new file with mode: 0644]
drivers/net/wireless/cw1200/main.c [new file with mode: 0644]
drivers/net/wireless/cw1200/pm.c [new file with mode: 0644]
drivers/net/wireless/cw1200/pm.h [new file with mode: 0644]
drivers/net/wireless/cw1200/queue.c [new file with mode: 0644]
drivers/net/wireless/cw1200/queue.h [new file with mode: 0644]
drivers/net/wireless/cw1200/scan.c [new file with mode: 0644]
drivers/net/wireless/cw1200/scan.h [new file with mode: 0644]
drivers/net/wireless/cw1200/sta.c [new file with mode: 0644]
drivers/net/wireless/cw1200/sta.h [new file with mode: 0644]
drivers/net/wireless/cw1200/txrx.c [new file with mode: 0644]
drivers/net/wireless/cw1200/txrx.h [new file with mode: 0644]
drivers/net/wireless/cw1200/wsm.c [new file with mode: 0644]
drivers/net/wireless/cw1200/wsm.h [new file with mode: 0644]
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/ipw2x00/libipw_rx.c
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/3945.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/commands.h
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlegacy/common.h
drivers/net/wireless/iwlwifi/Makefile
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/calib.c
drivers/net/wireless/iwlwifi/dvm/commands.h
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/devices.c
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/power.c
drivers/net/wireless/iwlwifi/dvm/rs.c
drivers/net/wireless/iwlwifi/dvm/rx.c
drivers/net/wireless/iwlwifi/dvm/scan.c
drivers/net/wireless/iwlwifi/dvm/tt.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-2000.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-debug.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-drv.h
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-modparams.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/iwlwifi/iwl-phy-db.c
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/Makefile
drivers/net/wireless/iwlwifi/mvm/bt-coex.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/quota.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/sta.h
drivers/net/wireless/iwlwifi/mvm/tt.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mwifiex/11h.c [new file with mode: 0644]
drivers/net/wireless/mwifiex/Kconfig
drivers/net/wireless/mwifiex/Makefile
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sdio.h
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/uap_event.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/orinoco_usb.c
drivers/net/wireless/p54/p54spi.c
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2800.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00pci.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt2x00queue.h
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192de/dm.c
drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
drivers/net/wireless/ti/wl1251/spi.c
drivers/net/wireless/ti/wl18xx/main.c
drivers/net/wireless/ti/wl18xx/reg.h
drivers/net/wireless/ti/wlcore/Makefile
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/ps.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/wireless/ti/wlcore/sysfs.c [new file with mode: 0644]
drivers/net/wireless/ti/wlcore/sysfs.h [new file with mode: 0644]
drivers/net/wireless/ti/wlcore/tx.c
drivers/nfc/Kconfig
drivers/nfc/Makefile
drivers/nfc/mei_phy.c
drivers/nfc/microread/microread.c
drivers/nfc/nfcsim.c [new file with mode: 0644]
drivers/nfc/nfcwilink.c
drivers/nfc/pn533.c
drivers/nfc/pn544/pn544.c
drivers/pcmcia/m8xx_pcmcia.c
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/dell-wmi-aio.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/hp_accel.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/pvpanic.c [new file with mode: 0644]
drivers/platform/x86/samsung-q10.c
drivers/platform/x86/sony-laptop.c
drivers/scsi/Kconfig
drivers/scsi/aic94xx/aic94xx_dev.c
drivers/scsi/aic94xx/aic94xx_hwi.c
drivers/scsi/aic94xx/aic94xx_tmf.c
drivers/scsi/be2iscsi/be.h
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_iscsi.h
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_main.h
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/be2iscsi/be_mgmt.h
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_els.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/bnx2fc/bnx2fc_tgt.c
drivers/scsi/csiostor/csio_lnode.h
drivers/scsi/csiostor/csio_rnode.h
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_fcs.c
drivers/scsi/fnic/fnic_fip.h [new file with mode: 0644]
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/vnic_dev.c
drivers/scsi/fnic/vnic_dev.h
drivers/scsi/fnic/vnic_devcmd.h
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/isci/remote_device.c
drivers/scsi/isci/remote_device.h
drivers/scsi/isci/request.c
drivers/scsi/isci/task.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/libsas/sas_internal.h
drivers/scsi/libsas/sas_port.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_logmsg.h
drivers/scsi/lpfc/lpfc_mbox.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/lpfc/lpfc_vport.c
drivers/scsi/lpfc/lpfc_vport.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/mvsas/mv_sas.h
drivers/scsi/pm8001/Makefile
drivers/scsi/pm8001/pm8001_ctl.c
drivers/scsi/pm8001/pm8001_defs.h
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_hwi.h
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/pm8001/pm8001_sas.h
drivers/scsi/pm8001/pm80xx_hwi.c [new file with mode: 0644]
drivers/scsi/pm8001/pm80xx_hwi.h [new file with mode: 0644]
drivers/scsi/qla2xxx/Kconfig
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla4xxx/ql4_iocb.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_pm.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/scsi/sd_dif.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/Makefile
drivers/scsi/ufs/ufshcd-pltfrm.c [new file with mode: 0644]
drivers/scsi/ufs/ufshcd.c
drivers/ssb/driver_chipcommon_sflash.c
drivers/ssb/main.c
drivers/ssb/ssb_private.h
drivers/tty/serial/68328serial.c
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/tty_audit.c
drivers/video/au1100fb.c
drivers/xen/Kconfig
drivers/xen/events.c
fs/ecryptfs/crypto.c
fs/ecryptfs/ecryptfs_kernel.h
fs/namei.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4recover.c
fs/notify/fanotify/fanotify_user.c
fs/romfs/mmap-nommu.c
include/linux/audit.h
include/linux/compat.h
include/linux/cpuidle.h
include/linux/device-mapper.h
include/linux/ftrace.h
include/linux/ftrace_event.h
include/linux/hid.h
include/linux/ieee80211.h
include/linux/pci_ids.h
include/linux/platform_data/net-cw1200.h [new file with mode: 0644]
include/linux/sched.h
include/linux/ssb/ssb_driver_mips.h
include/linux/ssb/ssb_regs.h
include/linux/tty.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/cfg80211.h
include/net/ieee80211_radiotap.h
include/net/mac80211.h
include/net/nfc/hci.h
include/net/nfc/nci_core.h
include/net/nfc/nfc.h
include/scsi/libsas.h
include/scsi/osd_protocol.h
include/scsi/sas.h
include/scsi/sas_ata.h
include/scsi/scsi_device.h
include/scsi/scsi_transport_iscsi.h
include/scsi/scsi_transport_sas.h
include/sound/tlv.h
include/uapi/linux/audit.h
include/uapi/linux/nfc.h
include/uapi/linux/nl80211.h
kernel/audit.c
kernel/audit.h
kernel/auditfilter.c
kernel/auditsc.c
kernel/params.c
kernel/sys_ni.c
kernel/sysctl_binary.c
kernel/trace/Kconfig
kernel/trace/ftrace.c
kernel/trace/trace_events.c
kernel/trace/trace_kprobe.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hidp/core.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/mac80211/aes_ccm.c
net/mac80211/cfg.c
net/mac80211/debugfs_netdev.c
net/mac80211/driver-ops.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/key.h
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/rate.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/vht.c
net/mac80211/wep.c
net/mac80211/wpa.c
net/nfc/core.c
net/nfc/hci/core.c
net/nfc/llcp.h
net/nfc/llcp_commands.c
net/nfc/llcp_core.c
net/nfc/llcp_sock.c
net/nfc/nci/Kconfig
net/nfc/nci/Makefile
net/nfc/nci/core.c
net/nfc/nci/data.c
net/nfc/nci/spi.c [new file with mode: 0644]
net/nfc/netlink.c
net/nfc/nfc.h
net/socket.c
net/sunrpc/auth_gss/gss_rpc_xdr.c
net/wireless/chan.c
net/wireless/core.c
net/wireless/core.h
net/wireless/debugfs.c
net/wireless/ibss.c
net/wireless/mesh.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/scan.c
net/wireless/sme.c
net/wireless/sysfs.c
net/wireless/trace.h
net/wireless/util.c
net/wireless/wext-compat.c
net/wireless/wext-sme.c
sound/atmel/abdac.c
sound/atmel/ac97c.c
sound/mips/hal2.c
sound/mips/sgio2audio.c
sound/oss/Kconfig
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/soc/codecs/wm8994.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/soc-dapm.c
tools/power/x86/turbostat/turbostat.c
virt/kvm/kvm_main.c

index 0f6a3edcd44b5c35b0bc2fa00560a0f942926c05..49267ea975684391c1f1e7144fb0c88700557d66 100644 (file)
 !Finclude/net/cfg80211.h cfg80211_ibss_params
 !Finclude/net/cfg80211.h cfg80211_connect_params
 !Finclude/net/cfg80211.h cfg80211_pmksa
-!Finclude/net/cfg80211.h cfg80211_send_rx_auth
-!Finclude/net/cfg80211.h cfg80211_send_auth_timeout
-!Finclude/net/cfg80211.h cfg80211_send_rx_assoc
-!Finclude/net/cfg80211.h cfg80211_send_assoc_timeout
-!Finclude/net/cfg80211.h cfg80211_send_deauth
-!Finclude/net/cfg80211.h __cfg80211_send_deauth
-!Finclude/net/cfg80211.h cfg80211_send_disassoc
-!Finclude/net/cfg80211.h __cfg80211_send_disassoc
+!Finclude/net/cfg80211.h cfg80211_rx_mlme_mgmt
+!Finclude/net/cfg80211.h cfg80211_auth_timeout
+!Finclude/net/cfg80211.h cfg80211_rx_assoc_resp
+!Finclude/net/cfg80211.h cfg80211_assoc_timeout
+!Finclude/net/cfg80211.h cfg80211_tx_mlme_mgmt
 !Finclude/net/cfg80211.h cfg80211_ibss_joined
 !Finclude/net/cfg80211.h cfg80211_connect_result
 !Finclude/net/cfg80211.h cfg80211_roamed
diff --git a/Documentation/devicetree/bindings/mips/ralink.txt b/Documentation/devicetree/bindings/mips/ralink.txt
new file mode 100644 (file)
index 0000000..b35a8d0
--- /dev/null
@@ -0,0 +1,17 @@
+Ralink MIPS SoC device tree bindings
+
+1. SoCs
+
+Each device tree must specify a compatible value for the Ralink SoC
+it uses in the compatible property of the root node. The compatible
+value must be one of the following values:
+
+  ralink,rt2880-soc
+  ralink,rt3050-soc
+  ralink,rt3052-soc
+  ralink,rt3350-soc
+  ralink,rt3352-soc
+  ralink,rt3883-soc
+  ralink,rt5350-soc
+  ralink,mt7620a-soc
+  ralink,mt7620n-soc
index 4d1919bf23322ac2209eb3046bfce404c5ced483..6931c4348d240ed9f8bf6b21a0d75f9c520edf1d 100644 (file)
@@ -42,6 +42,7 @@ onnn  ON Semiconductor Corp.
 picochip       Picochip Ltd
 powervr        PowerVR (deprecated, use img)
 qcom   Qualcomm, Inc.
+ralink Mediatek/Ralink Technology Corp.
 ramtron        Ramtron International
 realtek Realtek Semiconductor Corp.
 renesas        Renesas Electronics Corporation
index 3d7782b9f90d80d171c97e1d15371a9e2ec51ddc..e5069c22339197fd3fad9d25518aa6be971b5c03 100644 (file)
@@ -2299,6 +2299,11 @@ M:       Jaya Kumar <jayakumar.alsa@gmail.com>
 S:     Maintained
 F:     sound/pci/cs5535audio/
 
+CW1200 WLAN driver
+M:     Solomon Peachy <pizza@shaftnet.org>
+S:     Maintained
+F:     drivers/net/wireless/cw1200/
+
 CX18 VIDEO4LINUX DRIVER
 M:     Andy Walls <awalls@md.metrocast.net>
 L:     ivtv-devel@ivtvdriver.org (moderated for non-subscribers)
index a3a834b11a97ef7d9236a56e7b5e7ca78e9799b8..cd11e88576044e127cb73f49bd41c7d4fb5d3e9f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
-PATCHLEVEL = 9
+PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
index 491ae7923b10dd7ee8e156b53ee5bdb16edc1c30..5917099470eaf74d259051b4f7451683cc52901a 100644 (file)
@@ -182,6 +182,10 @@ config ARC_CACHE_PAGES
          Note that Global I/D ENABLE + Per Page DISABLE works but corollary
          Global DISABLE + Per Page ENABLE won't work
 
+config ARC_CACHE_VIPT_ALIASING
+       bool "Support VIPT Aliasing D$"
+       default n
+
 endif  #ARC_CACHE
 
 config ARC_HAS_ICCM
index 48af742f8b5a7f5d5078447a639730263b70771a..d8dd660898b9b214927c903e1198e36026513984 100644 (file)
@@ -32,7 +32,6 @@ generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sembuf.h
 generic-y += shmbuf.h
-generic-y += shmparam.h
 generic-y += siginfo.h
 generic-y += socket.h
 generic-y += sockios.h
index 6632273861fd205ce2bf25ed2d48b1e26f8f78ed..d5555fe4742a3f696c7c8ef72d6eae757b51469e 100644 (file)
@@ -55,9 +55,6 @@
        : "r"(data), "r"(ptr));         \
 })
 
-/* used to give SHMLBA a value to avoid Cache Aliasing */
-extern unsigned int ARC_shmlba;
-
 #define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
 
 /*
index ee1f6eae82d2f6afa9a85b78a19d06602e94f862..9f841af41092f059a604c0631984bb3c6ef067d7 100644 (file)
@@ -19,6 +19,7 @@
 #define _ASM_CACHEFLUSH_H
 
 #include <linux/mm.h>
+#include <asm/shmparam.h>
 
 /*
  * Semantically we need this because icache doesn't snoop dcache/dma.
@@ -33,7 +34,9 @@ void flush_cache_all(void);
 void flush_icache_range(unsigned long start, unsigned long end);
 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
 void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
-void __flush_dcache_page(unsigned long paddr);
+void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr);
+#define __flush_dcache_page(p, v)      \
+               ___flush_dcache_page((unsigned long)p, (unsigned long)v)
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 
@@ -50,18 +53,55 @@ void dma_cache_wback(unsigned long start, unsigned long sz);
 #define flush_cache_vmap(start, end)           flush_cache_all()
 #define flush_cache_vunmap(start, end)         flush_cache_all()
 
-/*
- * VM callbacks when entire/range of user-space V-P mappings are
- * torn-down/get-invalidated
- *
- * Currently we don't support D$ aliasing configs for our VIPT caches
- * NOPS for VIPT Cache with non-aliasing D$ configurations only
- */
-#define flush_cache_dup_mm(mm)                 /* called on fork */
+#define flush_cache_dup_mm(mm)                 /* called on fork (VIVT only) */
+
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+
 #define flush_cache_mm(mm)                     /* called on munmap/exit */
 #define flush_cache_range(mm, u_vstart, u_vend)
 #define flush_cache_page(vma, u_vaddr, pfn)    /* PF handling/COW-break */
 
+#else  /* VIPT aliasing dcache */
+
+/* To clear out stale userspace mappings */
+void flush_cache_mm(struct mm_struct *mm);
+void flush_cache_range(struct vm_area_struct *vma,
+       unsigned long start,unsigned long end);
+void flush_cache_page(struct vm_area_struct *vma,
+       unsigned long user_addr, unsigned long page);
+
+/*
+ * To make sure that userspace mapping is flushed to memory before
+ * get_user_pages() uses a kernel mapping to access the page
+ */
+#define ARCH_HAS_FLUSH_ANON_PAGE
+void flush_anon_page(struct vm_area_struct *vma,
+       struct page *page, unsigned long u_vaddr);
+
+#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
+
+/*
+ * Simple wrapper over config option
+ * Bootup code ensures that hardware matches kernel configuration
+ */
+static inline int cache_is_vipt_aliasing(void)
+{
+#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
+       return 1;
+#else
+       return 0;
+#endif
+}
+
+#define CACHE_COLOR(addr)      (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3)
+
+/*
+ * checks if two addresses (after page aligning) index into same cache set
+ */
+#define addr_not_cache_congruent(addr1, addr2)                         \
+       cache_is_vipt_aliasing() ?                                      \
+               (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0          \
+
 #define copy_to_user_page(vma, page, vaddr, dst, src, len)             \
 do {                                                                   \
        memcpy(dst, src, len);                                          \
index bdf54610455134d1af10a2adad2952f1928b788c..374a35514116c17a71a610c2959e90d213e7ed97 100644 (file)
 #define get_user_page(vaddr)           __get_free_page(GFP_KERNEL)
 #define free_user_page(page, addr)     free_page(addr)
 
-/* TBD: for now don't worry about VIPT D$ aliasing */
 #define clear_page(paddr)              memset((paddr), 0, PAGE_SIZE)
 #define copy_page(to, from)            memcpy((to), (from), PAGE_SIZE)
 
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
+
 #define clear_user_page(addr, vaddr, pg)       clear_page(addr)
 #define copy_user_page(vto, vfrom, vaddr, pg)  copy_page(vto, vfrom)
 
+#else  /* VIPT aliasing dcache */
+
+struct vm_area_struct;
+struct page;
+
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+void copy_user_highpage(struct page *to, struct page *from,
+                       unsigned long u_vaddr, struct vm_area_struct *vma);
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
+
+#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
+
 #undef STRICT_MM_TYPECHECKS
 
 #ifdef STRICT_MM_TYPECHECKS
index b7e36684c091d1f236d678007e998c5783b964bf..1cc4720faccbecf7862253720988f54e9b43751a 100644 (file)
@@ -395,6 +395,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 
 #include <asm-generic/pgtable.h>
 
+/* to cope with aliasing VIPT cache */
+#define HAVE_ARCH_UNMAPPED_AREA
+
 /*
  * No page table caches to initialise
  */
diff --git a/arch/arc/include/asm/shmparam.h b/arch/arc/include/asm/shmparam.h
new file mode 100644 (file)
index 0000000..fffeecc
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_SHMPARAM_H
+#define __ARC_ASM_SHMPARAM_H
+
+/* Handle upto 2 cache bins */
+#define        SHMLBA  (2 * PAGE_SIZE)
+
+/* Enforce SHMLBA in shmat */
+#define __ARCH_FORCE_SHMLBA
+
+#endif
index fe91719866a57fed68e5ddb53228e7fec61279b1..85b6df839bd7b93b11465c066bdd063b32e68b77 100644 (file)
@@ -30,13 +30,20 @@ do {                                                \
 /*
  * This pair is called at time of munmap/exit to flush cache and TLB entries
  * for mappings being torn down.
- * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
- *    as we don't support aliasing configs in our VIPT D$.
+ * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
  * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
  *
  * Note, read http://lkml.org/lkml/2004/1/15/6
  */
+#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
 #define tlb_start_vma(tlb, vma)
+#else
+#define tlb_start_vma(tlb, vma)                                                \
+do {                                                                   \
+       if (!tlb->fullmm)                                               \
+               flush_cache_range(vma, vma->vm_start, vma->vm_end);     \
+} while(0)
+#endif
 
 #define tlb_end_vma(tlb, vma)                                          \
 do {                                                                   \
index 168dc146a8f6e33f6bf0e5bb83ca6cf9da1511af..ac95cc239c1e47d3d2f5a133b1d66936c6af0b91 100644 (file)
@@ -7,4 +7,4 @@
 #
 
 obj-y  := extable.o ioremap.o dma.o fault.o init.o
-obj-y  += tlb.o tlbex.o cache_arc700.o
+obj-y  += tlb.o tlbex.o cache_arc700.o mmap.o
index c854cf95f70666669dcff0eec6006bbbe788bab0..2f12bca8aef30c4155b21e514e3ba9dd5ec63468 100644 (file)
@@ -68,6 +68,7 @@
 #include <linux/mmu_context.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
+#include <linux/pagemap.h>
 #include <asm/cacheflush.h>
 #include <asm/cachectl.h>
 #include <asm/setup.h>
@@ -138,6 +139,7 @@ void __cpuinit arc_cache_init(void)
        struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
        struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
        int way_pg_ratio = way_pg_ratio;
+       int dcache_does_alias;
        char str[256];
 
        printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
@@ -184,9 +186,13 @@ chk_dc:
                panic("Cache H/W doesn't match kernel Config");
        }
 
+       dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE;
+
        /* check for D-Cache aliasing */
-       if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE)
-               panic("D$ aliasing not handled right now\n");
+       if (dcache_does_alias && !cache_is_vipt_aliasing())
+               panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
+       else if (!dcache_does_alias && cache_is_vipt_aliasing())
+               panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
 #endif
 
        /* Set the default Invalidate Mode to "simpy discard dirty lines"
@@ -269,47 +275,57 @@ static inline void __dc_entire_op(const int cacheop)
  * Per Line Operation on D-Cache
  * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
  * It's sole purpose is to help gcc generate ZOL
+ * (aliasing VIPT dcache flushing needs both vaddr and paddr)
  */
-static inline void __dc_line_loop(unsigned long start, unsigned long sz,
-                                         int aux_reg)
+static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
+                                 unsigned long sz, const int aux_reg)
 {
-       int num_lines, slack;
+       int num_lines;
 
        /* Ensure we properly floor/ceil the non-line aligned/sized requests
-        * and have @start - aligned to cache line and integral @num_lines.
+        * and have @paddr - aligned to cache line and integral @num_lines.
         * This however can be avoided for page sized since:
-        *  -@start will be cache-line aligned already (being page aligned)
+        *  -@paddr will be cache-line aligned already (being page aligned)
         *  -@sz will be integral multiple of line size (being page sized).
         */
        if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
-               slack = start & ~DCACHE_LINE_MASK;
-               sz += slack;
-               start -= slack;
+               sz += paddr & ~DCACHE_LINE_MASK;
+               paddr &= DCACHE_LINE_MASK;
+               vaddr &= DCACHE_LINE_MASK;
        }
 
        num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
 
+#if (CONFIG_ARC_MMU_VER <= 2)
+       paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
+#endif
+
        while (num_lines-- > 0) {
 #if (CONFIG_ARC_MMU_VER > 2)
                /*
                 * Just as for I$, in MMU v3, D$ ops also require
                 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
-                * But we pass phy addr for both. This works since Linux
-                * doesn't support aliasing configs for D$, yet.
-                * Thus paddr is enough to provide both tag and index.
                 */
-               write_aux_reg(ARC_REG_DC_PTAG, start);
+               write_aux_reg(ARC_REG_DC_PTAG, paddr);
+
+               write_aux_reg(aux_reg, vaddr);
+               vaddr += ARC_DCACHE_LINE_LEN;
+#else
+               /* paddr contains stuffed vaddrs bits */
+               write_aux_reg(aux_reg, paddr);
 #endif
-               write_aux_reg(aux_reg, start);
-               start += ARC_DCACHE_LINE_LEN;
+               paddr += ARC_DCACHE_LINE_LEN;
        }
 }
 
+/* For kernel mappings cache operation: index is same as paddr */
+#define __dc_line_op_k(p, sz, op)      __dc_line_op(p, p, sz, op)
+
 /*
  * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
  */
-static inline void __dc_line_op(unsigned long start, unsigned long sz,
-                                       const int cacheop)
+static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
+                               unsigned long sz, const int cacheop)
 {
        unsigned long flags, tmp = tmp;
        int aux;
@@ -332,7 +348,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
        else
                aux = ARC_REG_DC_FLDL;
 
-       __dc_line_loop(start, sz, aux);
+       __dc_line_loop(paddr, vaddr, sz, aux);
 
        if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
                wait_for_flush();
@@ -347,7 +363,8 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
 #else
 
 #define __dc_entire_op(cacheop)
-#define __dc_line_op(start, sz, cacheop)
+#define __dc_line_op(paddr, vaddr, sz, cacheop)
+#define __dc_line_op_k(paddr, sz, cacheop)
 
 #endif /* CONFIG_ARC_HAS_DCACHE */
 
@@ -399,49 +416,45 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz,
 /***********************************************************
  * Machine specific helper for per line I-Cache invalidate.
  */
-static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
+static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
                                unsigned long sz)
 {
        unsigned long flags;
-       int num_lines, slack;
-       unsigned int addr;
+       int num_lines;
 
        /*
         * Ensure we properly floor/ceil the non-line aligned/sized requests:
         * However page sized flushes can be compile time optimised.
-        *  -@phy_start will be cache-line aligned already (being page aligned)
+        *  -@paddr will be cache-line aligned already (being page aligned)
         *  -@sz will be integral multiple of line size (being page sized).
         */
        if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
-               slack = phy_start & ~ICACHE_LINE_MASK;
-               sz += slack;
-               phy_start -= slack;
+               sz += paddr & ~ICACHE_LINE_MASK;
+               paddr &= ICACHE_LINE_MASK;
+               vaddr &= ICACHE_LINE_MASK;
        }
 
        num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
 
-#if (CONFIG_ARC_MMU_VER > 2)
-       vaddr &= ~ICACHE_LINE_MASK;
-       addr = phy_start;
-#else
+#if (CONFIG_ARC_MMU_VER <= 2)
        /* bits 17:13 of vaddr go as bits 4:0 of paddr */
-       addr = phy_start | ((vaddr >> 13) & 0x1F);
+       paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
 #endif
 
        local_irq_save(flags);
        while (num_lines-- > 0) {
 #if (CONFIG_ARC_MMU_VER > 2)
                /* tag comes from phy addr */
-               write_aux_reg(ARC_REG_IC_PTAG, addr);
+               write_aux_reg(ARC_REG_IC_PTAG, paddr);
 
                /* index bits come from vaddr */
                write_aux_reg(ARC_REG_IC_IVIL, vaddr);
                vaddr += ARC_ICACHE_LINE_LEN;
 #else
                /* paddr contains stuffed vaddrs bits */
-               write_aux_reg(ARC_REG_IC_IVIL, addr);
+               write_aux_reg(ARC_REG_IC_IVIL, paddr);
 #endif
-               addr += ARC_ICACHE_LINE_LEN;
+               paddr += ARC_ICACHE_LINE_LEN;
        }
        local_irq_restore(flags);
 }
@@ -457,29 +470,66 @@ static void __ic_line_inv_vaddr(unsigned long phy_start, unsigned long vaddr,
  * Exported APIs
  */
 
+/*
+ * Handle cache congruency of kernel and userspace mappings of page when kernel
+ * writes-to/reads-from
+ *
+ * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
+ *  -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
+ *  -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
+ *  -In SMP, if hardware caches are coherent
+ *
+ * There's a corollary case, where kernel READs from a userspace mapped page.
+ * If the U-mapping is not congruent to to K-mapping, former needs flushing.
+ */
 void flush_dcache_page(struct page *page)
 {
-       /* Make a note that dcache is not yet flushed for this page */
-       set_bit(PG_arch_1, &page->flags);
+       struct address_space *mapping;
+
+       if (!cache_is_vipt_aliasing()) {
+               set_bit(PG_arch_1, &page->flags);
+               return;
+       }
+
+       /* don't handle anon pages here */
+       mapping = page_mapping(page);
+       if (!mapping)
+               return;
+
+       /*
+        * pagecache page, file not yet mapped to userspace
+        * Make a note that K-mapping is dirty
+        */
+       if (!mapping_mapped(mapping)) {
+               set_bit(PG_arch_1, &page->flags);
+       } else if (page_mapped(page)) {
+
+               /* kernel reading from page with U-mapping */
+               void *paddr = page_address(page);
+               unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
+
+               if (addr_not_cache_congruent(paddr, vaddr))
+                       __flush_dcache_page(paddr, vaddr);
+       }
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
 
 void dma_cache_wback_inv(unsigned long start, unsigned long sz)
 {
-       __dc_line_op(start, sz, OP_FLUSH_N_INV);
+       __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
 }
 EXPORT_SYMBOL(dma_cache_wback_inv);
 
 void dma_cache_inv(unsigned long start, unsigned long sz)
 {
-       __dc_line_op(start, sz, OP_INV);
+       __dc_line_op_k(start, sz, OP_INV);
 }
 EXPORT_SYMBOL(dma_cache_inv);
 
 void dma_cache_wback(unsigned long start, unsigned long sz)
 {
-       __dc_line_op(start, sz, OP_FLUSH);
+       __dc_line_op_k(start, sz, OP_FLUSH);
 }
 EXPORT_SYMBOL(dma_cache_wback);
 
@@ -560,7 +610,7 @@ void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
 
        local_irq_save(flags);
        __ic_line_inv_vaddr(paddr, vaddr, len);
-       __dc_line_op(paddr, len, OP_FLUSH);
+       __dc_line_op(paddr, vaddr, len, OP_FLUSH);
        local_irq_restore(flags);
 }
 
@@ -570,9 +620,13 @@ void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
        __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
 }
 
-void __flush_dcache_page(unsigned long paddr)
+/*
+ * wrapper to clearout kernel or userspace mappings of a page
+ * For kernel mappings @vaddr == @paddr
+ */
+void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
 {
-       __dc_line_op(paddr, PAGE_SIZE, OP_FLUSH_N_INV);
+       __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
 }
 
 void flush_icache_all(void)
@@ -601,6 +655,87 @@ noinline void flush_cache_all(void)
 
 }
 
+#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+       flush_cache_all();
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
+                     unsigned long pfn)
+{
+       unsigned int paddr = pfn << PAGE_SHIFT;
+
+       __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE);
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+                      unsigned long end)
+{
+       flush_cache_all();
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+       unsigned long u_vaddr, struct vm_area_struct *vma)
+{
+       void *kfrom = page_address(from);
+       void *kto = page_address(to);
+       int clean_src_k_mappings = 0;
+
+       /*
+        * If SRC page was already mapped in userspace AND it's U-mapping is
+        * not congruent with K-mapping, sync former to physical page so that
+        * K-mapping in memcpy below, sees the right data
+        *
+        * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
+        * equally valid for SRC page as well
+        */
+       if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
+               __flush_dcache_page(kfrom, u_vaddr);
+               clean_src_k_mappings = 1;
+       }
+
+       copy_page(kto, kfrom);
+
+       /*
+        * Mark DST page K-mapping as dirty for a later finalization by
+        * update_mmu_cache(). Although the finalization could have been done
+        * here as well (given that both vaddr/paddr are available).
+        * But update_mmu_cache() already has code to do that for other
+        * non copied user pages (e.g. read faults which wire in pagecache page
+        * directly).
+        */
+       set_bit(PG_arch_1, &to->flags);
+
+       /*
+        * if SRC was already usermapped and non-congruent to kernel mapping
+        * sync the kernel mapping back to physical page
+        */
+       if (clean_src_k_mappings) {
+               __flush_dcache_page(kfrom, kfrom);
+       } else {
+               set_bit(PG_arch_1, &from->flags);
+       }
+}
+
+void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
+{
+       clear_page(to);
+       set_bit(PG_arch_1, &page->flags);
+}
+
+void flush_anon_page(struct vm_area_struct *vma, struct page *page,
+                    unsigned long u_vaddr)
+{
+       /* TBD: do we really need to clear the kernel mapping */
+       __flush_dcache_page(page_address(page), u_vaddr);
+       __flush_dcache_page(page_address(page), page_address(page));
+
+}
+
+#endif
+
 /**********************************************************************
  * Explicit Cache flush request from user space via syscall
  * Needed for JITs which generate code on the fly
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
new file mode 100644 (file)
index 0000000..2e06d56
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * ARC700 mmap
+ *
+ * (started from arm version - for VIPT alias handling)
+ *
+ * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <asm/cacheflush.h>
+
+#define COLOUR_ALIGN(addr, pgoff)                      \
+       ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) +      \
+        (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
+
+/*
+ * Ensure that shared mappings are correctly aligned to
+ * avoid aliasing issues with VIPT caches.
+ * We need to ensure that
+ * a specific page of an object is always mapped at a multiple of
+ * SHMLBA bytes.
+ */
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       int do_align = 0;
+       int aliasing = cache_is_vipt_aliasing();
+       struct vm_unmapped_area_info info;
+
+       /*
+        * We only need to do colour alignment if D cache aliases.
+        */
+       if (aliasing)
+               do_align = filp || (flags & MAP_SHARED);
+
+       /*
+        * We enforce the MAP_FIXED case.
+        */
+       if (flags & MAP_FIXED) {
+               if (aliasing && flags & MAP_SHARED &&
+                   (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+                       return -EINVAL;
+               return addr;
+       }
+
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       if (addr) {
+               if (do_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+               else
+                       addr = PAGE_ALIGN(addr);
+
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr &&
+                   (!vma || addr + len <= vma->vm_start))
+                       return addr;
+       }
+
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = mm->mmap_base;
+       info.high_limit = TASK_SIZE;
+       info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+       info.align_offset = pgoff << PAGE_SHIFT;
+       return vm_unmapped_area(&info);
+}
index 003d69ac6ffa63935a7bedf1089b61e238ff7a12..066145b5f3488bcaa515769728d0389a00fc7ed7 100644 (file)
@@ -421,25 +421,40 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 /*
  * Called at the end of pagefault, for a userspace mapped page
  *  -pre-install the corresponding TLB entry into MMU
- *  -Finalize the delayed D-cache flush (wback+inv kernel mapping)
+ *  -Finalize the delayed D-cache flush of kernel mapping of page due to
+ *     flush_dcache_page(), copy_user_page()
+ *
+ * Note that flush (when done) involves both WBACK - so physical page is
+ * in sync as well as INV - so any non-congruent aliases don't remain
  */
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
                      pte_t *ptep)
 {
        unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
+       unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
 
        create_tlb(vma, vaddr, ptep);
 
-       /* icache doesn't snoop dcache, thus needs to be made coherent here */
-       if (vma->vm_flags & VM_EXEC) {
+       /*
+        * Exec page : Independent of aliasing/page-color considerations,
+        *             since icache doesn't snoop dcache on ARC, any dirty
+        *             K-mapping of a code page needs to be wback+inv so that
+        *             icache fetch by userspace sees code correctly.
+        * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
+        *             so userspace sees the right data.
+        *  (Avoids the flush for Non-exec + congruent mapping case)
+        */
+       if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) {
                struct page *page = pfn_to_page(pte_pfn(*ptep));
 
-               /* if page was dcache dirty, flush now */
                int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
                if (dirty) {
-                       unsigned long paddr =  pte_val(*ptep) & PAGE_MASK;
-                       __flush_dcache_page(paddr);
-                       __inv_icache_page(paddr, vaddr);
+                       /* wback + inv dcache lines */
+                       __flush_dcache_page(paddr, paddr);
+
+                       /* invalidate any existing icache lines */
+                       if (vma->vm_flags & VM_EXEC)
+                               __inv_icache_page(paddr, vaddr);
                }
        }
 }
index 4e121272c4e5418b86f1db7845713f904f428593..1d3452100f1ff448cd4ed493e7ddf85fde0d9f35 100644 (file)
@@ -27,10 +27,3 @@ menuconfig ARC_PLAT_TB10X
          Abilis Systems. TB10x is based on the ARC700 CPU architecture.
          Say Y if you are building a kernel for one of the SOCs in this
          series (e.g. TB100 or TB101). If in doubt say N.
-
-if ARC_PLAT_TB10X
-
-config GENERIC_GPIO
-       def_bool y
-
-endif
index 12f22492df4ced64e6268449a80598a2ae5676f2..58125bf008d3e647e5c89e6ba534827f0a0d5629 100644 (file)
@@ -389,7 +389,7 @@ __SYSCALL(364, sys_perf_event_open)
 __SYSCALL(365, compat_sys_recvmmsg)
 __SYSCALL(366, sys_accept4)
 __SYSCALL(367, sys_fanotify_init)
-__SYSCALL(368, compat_sys_fanotify_mark_wrapper)
+__SYSCALL(368, compat_sys_fanotify_mark)
 __SYSCALL(369, sys_prlimit64)
 __SYSCALL(370, sys_name_to_handle_at)
 __SYSCALL(371, compat_sys_open_by_handle_at)
index db01aa978c41e2162ed91714c91e074a361ecdd2..a1b19ed7467cf1c026147acaa2539b1dc6f60817 100644 (file)
@@ -104,13 +104,6 @@ compat_sys_fallocate_wrapper:
        b       sys_fallocate
 ENDPROC(compat_sys_fallocate_wrapper)
 
-compat_sys_fanotify_mark_wrapper:
-       orr     x2, x2, x3, lsl #32
-       mov     w3, w4
-       mov     w4, w5
-       b       sys_fanotify_mark
-ENDPROC(compat_sys_fanotify_mark_wrapper)
-
 #undef __SYSCALL
 #define __SYSCALL(x, y)                .quad   y       // x
 
index 66cf00095b8487210b3187cbf41072bc3d80406b..1fce08632ad763abbe68a640a442cea1e0353a7f 100644 (file)
@@ -141,11 +141,11 @@ archclean:
 
 INSTALL_PATH ?= /tftpboot
 boot := arch/$(ARCH)/boot
-BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip
+BOOT_TARGETS = uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip
 PHONY += $(BOOT_TARGETS) install
-KBUILD_IMAGE := $(boot)/vmImage
+KBUILD_IMAGE := $(boot)/uImage
 
-all: vmImage
+all: uImage
 
 $(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
index f7d27d50d02c83fa35ad4606c11ecbd13a555904..3efaa094fb90e44050ed17f62117a60ca098ae8b 100644 (file)
@@ -6,7 +6,7 @@
 # for more details.
 #
 
-targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip
+targets := uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip
 extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.xip
 
 ifeq ($(CONFIG_RAMKERNEL),y)
@@ -39,22 +39,22 @@ quiet_cmd_mk_bin_xip = BIN     $@
 $(obj)/vmlinux.bin.xip: $(obj)/vmlinux.bin FORCE
        $(call if_changed,mk_bin_xip)
 
-$(obj)/vmImage.bin: $(obj)/vmlinux.bin
+$(obj)/uImage.bin: $(obj)/vmlinux.bin
        $(call if_changed,uimage,none)
 
-$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
        $(call if_changed,uimage,bzip2)
 
-$(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
        $(call if_changed,uimage,gzip)
 
-$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
        $(call if_changed,uimage,lzma)
 
-$(obj)/vmImage.lzo: $(obj)/vmlinux.bin.lzo
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
        $(call if_changed,uimage,lzo)
 
-$(obj)/vmImage.xip: $(obj)/vmlinux.bin.xip
+$(obj)/uImage.xip: $(obj)/vmlinux.bin.xip
        $(call if_changed,uimage,none)
 
 suffix-y                      := bin
@@ -64,7 +64,7 @@ suffix-$(CONFIG_KERNEL_LZMA)  := lzma
 suffix-$(CONFIG_KERNEL_LZO)   := lzo
 suffix-$(CONFIG_ROMKERNEL)    := xip
 
-$(obj)/vmImage: $(obj)/vmImage.$(suffix-y)
+$(obj)/uImage: $(obj)/uImage.$(suffix-y)
        @ln -sf $(notdir $<) $@
 
 install:
index c8db653c72d2c05f75046026cf1bc1621599d92e..a107a98e99783e4cccb9d47a87bd40c1f6de010d 100644 (file)
@@ -11,7 +11,9 @@
 
 #ifdef CONFIG_SMP
 
+#include <asm/barrier.h>
 #include <linux/linkage.h>
+#include <linux/types.h>
 
 asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
 asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
index 6a4cfe2d33679692117308c5ab0d0395f0ec2091..a99957ea9e9b504b66b4e31f6660ddf5efc18c8e 100644 (file)
@@ -24,18 +24,27 @@ struct bfin_sd_host {
 #define CMD_INT_E          (1 << 8)    /* Command Interrupt */
 #define CMD_PEND_E         (1 << 9)    /* Command Pending */
 #define CMD_E              (1 << 10)   /* Command Enable */
+#ifdef RSI_BLKSZ
+#define CMD_CRC_CHECK_D    (1 << 11)   /* CRC Check is disabled */
+#define CMD_DATA0_BUSY     (1 << 12)   /* Check for Busy State on the DATA0 pin */
+#endif
 
 /* SDH_PWR_CTL bitmasks */
+#ifndef RSI_BLKSZ
 #define PWR_ON             0x3         /* Power On */
 #define SD_CMD_OD          (1 << 6)    /* Open Drain Output */
 #define ROD_CTL            (1 << 7)    /* Rod Control */
+#endif
 
 /* SDH_CLK_CTL bitmasks */
 #define CLKDIV             0xff        /* MC_CLK Divisor */
 #define CLK_E              (1 << 8)    /* MC_CLK Bus Clock Enable */
 #define PWR_SV_E           (1 << 9)    /* Power Save Enable */
 #define CLKDIV_BYPASS      (1 << 10)   /* Bypass Divisor */
-#define WIDE_BUS           (1 << 11)   /* Wide Bus Mode Enable */
+#define BUS_MODE_MASK      0x1800      /* Bus Mode Mask */
+#define STD_BUS_1          0x000       /* Standard Bus 1 bit mode */
+#define WIDE_BUS_4         0x800       /* Wide Bus 4 bit mode */
+#define BYTE_BUS_8         0x1000      /* Byte Bus 8 bit mode */
 
 /* SDH_RESP_CMD bitmasks */
 #define RESP_CMD           0x3f        /* Response Command */
@@ -45,7 +54,13 @@ struct bfin_sd_host {
 #define DTX_DIR            (1 << 1)    /* Data Transfer Direction */
 #define DTX_MODE           (1 << 2)    /* Data Transfer Mode */
 #define DTX_DMA_E          (1 << 3)    /* Data Transfer DMA Enable */
+#ifndef RSI_BLKSZ
 #define DTX_BLK_LGTH       (0xf << 4)  /* Data Transfer Block Length */
+#else
+
+/* Bit masks for SDH_BLK_SIZE */
+#define DTX_BLK_LGTH       0x1fff      /* Data Transfer Block Length */
+#endif
 
 /* SDH_STATUS bitmasks */
 #define CMD_CRC_FAIL       (1 << 0)    /* CMD CRC Fail */
@@ -114,10 +129,14 @@ struct bfin_sd_host {
 /* SDH_E_STATUS bitmasks */
 #define SDIO_INT_DET       (1 << 1)    /* SDIO Int Detected */
 #define SD_CARD_DET        (1 << 4)    /* SD Card Detect */
+#define SD_CARD_BUSYMODE   (1 << 31)   /* Card is in Busy mode */
+#define SD_CARD_SLPMODE    (1 << 30)   /* Card in Sleep Mode */
+#define SD_CARD_READY      (1 << 17)   /* Card Ready */
 
 /* SDH_E_MASK bitmasks */
 #define SDIO_MSK           (1 << 1)    /* Mask SDIO Int Detected */
-#define SCD_MSK            (1 << 6)    /* Mask Card Detect */
+#define SCD_MSK            (1 << 4)    /* Mask Card Detect */
+#define CARD_READY_MSK     (1 << 16)   /* Mask Card Ready */
 
 /* SDH_CFG bitmasks */
 #define CLKS_EN            (1 << 0)    /* Clocks Enable */
@@ -126,7 +145,15 @@ struct bfin_sd_host {
 #define SD_RST             (1 << 4)    /* SDMMC Reset */
 #define PUP_SDDAT          (1 << 5)    /* Pull-up SD_DAT */
 #define PUP_SDDAT3         (1 << 6)    /* Pull-up SD_DAT3 */
+#ifndef RSI_BLKSZ
 #define PD_SDDAT3          (1 << 7)    /* Pull-down SD_DAT3 */
+#else
+#define PWR_ON             0x600       /* Power On */
+#define SD_CMD_OD          (1 << 11)   /* Open Drain Output */
+#define BOOT_EN            (1 << 12)   /* Boot Enable */
+#define BOOT_MODE          (1 << 13)   /* Alternate Boot Mode */
+#define BOOT_ACK_EN        (1 << 14)   /* Boot ACK is expected */
+#endif
 
 /* SDH_RD_WAIT_EN bitmasks */
 #define RWR                (1 << 0)    /* Read Wait Request */
index 8a0fed16058f217cfb6badb41dc176faa40b70af..0ca40dd44724200fc0d098a579e1c9f1b50d6c57 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm-generic/bitops/non-atomic.h>
 #else
 
+#include <asm/barrier.h>
 #include <asm/byteorder.h>     /* swab32 */
 #include <linux/linkage.h>
 
index fe0ca03a1cb2dc2552d0c564ec8ca03a17991625..ca67145c6a45987600407d44f80919b0e718b25e 100644 (file)
@@ -622,10 +622,12 @@ do { \
 #define PAGE_SIZE_4KB      0x00010000  /* 4 KB page size */
 #define PAGE_SIZE_1MB      0x00020000  /* 1 MB page size */
 #define PAGE_SIZE_4MB      0x00030000  /* 4 MB page size */
+#ifdef CONFIG_BF60x
 #define PAGE_SIZE_16KB     0x00040000  /* 16 KB page size */
 #define PAGE_SIZE_64KB     0x00050000  /* 64 KB page size */
 #define PAGE_SIZE_16MB     0x00060000  /* 16 MB page size */
 #define PAGE_SIZE_64MB     0x00070000  /* 64 MB page size */
+#endif
 #define CPLB_L1SRAM        0x00000020  /* 0=SRAM mapped in L1, 0=SRAM not
                                         * mapped to L1
                                         */
index 9b33e7247864c2a14f236fac89ba30e40961c028..c865b33eeb68bc271c4a8e9f14e72d75b5c3edef 100644 (file)
 struct ddr_config {
        u32 ddr_clk;
        u32 dmc_ddrctl;
+       u32 dmc_effctl;
        u32 dmc_ddrcfg;
        u32 dmc_ddrtr0;
        u32 dmc_ddrtr1;
@@ -348,6 +349,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [0] = {
                .ddr_clk    = 125,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20705212,
                .dmc_ddrtr1 = 0x201003CF,
@@ -358,6 +360,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [1] = {
                .ddr_clk    = 133,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20806313,
                .dmc_ddrtr1 = 0x2013040D,
@@ -368,6 +371,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [2] = {
                .ddr_clk    = 150,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20A07323,
                .dmc_ddrtr1 = 0x20160492,
@@ -378,6 +382,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [3] = {
                .ddr_clk    = 166,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20A07323,
                .dmc_ddrtr1 = 0x2016050E,
@@ -388,6 +393,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [4] = {
                .ddr_clk    = 200,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20a07323,
                .dmc_ddrtr1 = 0x2016050f,
@@ -398,6 +404,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [5] = {
                .ddr_clk    = 225,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20E0A424,
                .dmc_ddrtr1 = 0x302006DB,
@@ -408,6 +415,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1")))
        [6] = {
                .ddr_clk    = 250,
                .dmc_ddrctl = 0x00000904,
+               .dmc_effctl = 0x004400C0,
                .dmc_ddrcfg = 0x00000422,
                .dmc_ddrtr0 = 0x20E0A424,
                .dmc_ddrtr1 = 0x3020079E,
@@ -469,6 +477,7 @@ static inline void init_dmc(u32 dmc_clk)
                        bfin_write_DMC0_TR2(ddr_config_table[i].dmc_ddrtr2);
                        bfin_write_DMC0_MR(ddr_config_table[i].dmc_ddrmr);
                        bfin_write_DMC0_EMR1(ddr_config_table[i].dmc_ddrmr1);
+                       bfin_write_DMC0_EFFCTL(ddr_config_table[i].dmc_effctl);
                        bfin_write_DMC0_CTL(ddr_config_table[i].dmc_ddrctl);
                        break;
                }
index 34e96ce02aa9671701197e7bc3b8f65902fbb5d5..b49a53b583d582acbf8670776aa87c6559d0c0e7 100644 (file)
@@ -30,6 +30,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
 {
        int i_d, i_i;
        unsigned long addr;
+       unsigned long cplb_pageflags, cplb_pagesize;
 
        struct cplb_entry *d_tbl = dcplb_tbl[cpu];
        struct cplb_entry *i_tbl = icplb_tbl[cpu];
@@ -49,11 +50,20 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
        /* Cover kernel memory with 4M pages.  */
        addr = 0;
 
-       for (; addr < memory_start; addr += 4 * 1024 * 1024) {
+#ifdef PAGE_SIZE_16MB
+       cplb_pageflags = PAGE_SIZE_16MB;
+       cplb_pagesize = SIZE_16M;
+#else
+       cplb_pageflags = PAGE_SIZE_4MB;
+       cplb_pagesize = SIZE_4M;
+#endif
+
+
+       for (; addr < memory_start; addr += cplb_pagesize) {
                d_tbl[i_d].addr = addr;
-               d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
+               d_tbl[i_d++].data = SDRAM_DGENERIC | cplb_pageflags;
                i_tbl[i_i].addr = addr;
-               i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
+               i_tbl[i_i++].data = SDRAM_IGENERIC | cplb_pageflags;
        }
 
 #ifdef CONFIG_ROMKERNEL
index e854f9066cbde005413bf0659e59fbed6a3b0423..79cc0f6dcdd5c26fa53137914ccc1bed0fdfd5ab 100644 (file)
@@ -145,7 +145,7 @@ MGR_ATTR static int dcplb_miss(int cpu)
        unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
        int status = bfin_read_DCPLB_STATUS();
        int idx;
-       unsigned long d_data, base, addr1, eaddr;
+       unsigned long d_data, base, addr1, eaddr, cplb_pagesize, cplb_pageflags;
 
        nr_dcplb_miss[cpu]++;
        if (unlikely(status & FAULT_USERSUPV))
@@ -167,18 +167,37 @@ MGR_ATTR static int dcplb_miss(int cpu)
        if (unlikely(d_data == 0))
                return CPLB_NO_ADDR_MATCH;
 
-       addr1 = addr & ~(SIZE_4M - 1);
        addr &= ~(SIZE_1M - 1);
        d_data |= PAGE_SIZE_1MB;
-       if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
+
+       /* BF60x support large than 4M CPLB page size */
+#ifdef PAGE_SIZE_16MB
+       cplb_pageflags = PAGE_SIZE_16MB;
+       cplb_pagesize = SIZE_16M;
+#else
+       cplb_pageflags = PAGE_SIZE_4MB;
+       cplb_pagesize = SIZE_4M;
+#endif
+
+find_pagesize:
+       addr1 = addr & ~(cplb_pagesize - 1);
+       if (addr1 >= base && (addr1 + cplb_pagesize) <= eaddr) {
                /*
                 * This works because
                 * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
                 */
-               d_data |= PAGE_SIZE_4MB;
+               d_data |= cplb_pageflags;
                addr = addr1;
+               goto found_pagesize;
+       } else {
+               if (cplb_pagesize > SIZE_4M) {
+                       cplb_pageflags = PAGE_SIZE_4MB;
+                       cplb_pagesize = SIZE_4M;
+                       goto find_pagesize;
+               }
        }
 
+found_pagesize:
 #ifdef CONFIG_BF60x
        if ((addr >= ASYNC_BANK0_BASE)
                && (addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE))
index 404045dcc5e4bc321c30ddf0eecc1a259e5312ce..5b80d59e66e57b11a3592378abc6914adc938f28 100644 (file)
 #include <asm/cplbinit.h>
 #include <asm/blackfin.h>
 
-static char const page_strtbl[][3] = { "1K", "4K", "1M", "4M" };
-#define page(flags)    (((flags) & 0x30000) >> 16)
+static char const page_strtbl[][4] = {
+       "1K", "4K", "1M", "4M",
+#ifdef CONFIG_BF60x
+       "16K", "64K", "16M", "64M",
+#endif
+};
+#define page(flags)    (((flags) & 0x70000) >> 16)
 #define strpage(flags) page_strtbl[page(flags)]
 
 struct cplbinfo_data {
index fb96e607adcf815890de6ce475c55a3cfd76a42b..107b306b06f10819fe1fc6c3d1e3e87cdaa3afc6 100644 (file)
@@ -1314,7 +1314,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                        seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
        }
 
-       seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
+       seq_printf(m, "\ncpu MHz\t\t: %lu.%06lu/%lu.%06lu\n",
                cclk/1000000, cclk%1000000,
                sclk/1000000, sclk%1000000);
        seq_printf(m, "bogomips\t: %lu.%02lu\n"
index 95114ed395ac6939d6a82551c289de52fa943edf..6a3a14bcd3a1ee338be3c90d68f3672575904d39 100644 (file)
@@ -455,6 +455,7 @@ static struct platform_device bfin_async_nand_device = {
 static void bfin_plat_nand_init(void)
 {
        gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat");
+       gpio_direction_input(BFIN_NAND_PLAT_READY);
 }
 #else
 static void bfin_plat_nand_init(void) {}
index a4fce0370c1db2455dfeb16f3526b993333bab38..755f0dc120100a7f359d1137059c3f094cb10d75 100644 (file)
@@ -764,7 +764,6 @@ static struct platform_device i2c_bfin_twi1_device = {
        .num_resources = ARRAY_SIZE(bfin_twi1_resource),
        .resource = bfin_twi1_resource,
 };
-#endif /* CONFIG_BF542 */
 #endif /* CONFIG_I2C_BLACKFIN_TWI */
 
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
index 4954cf3f7e16d82ec9dba7b5bc1841362e465c94..102ee4025ac9fb373fd7192cb08fb564b660d4d1 100644 (file)
 #define bfin_write_DMC0_EMR1(val) bfin_write32(DMC0_EMR1, val)
 #define bfin_read_DMC0_CTL() bfin_read32(DMC0_CTL)
 #define bfin_write_DMC0_CTL(val) bfin_write32(DMC0_CTL, val)
+#define bfin_read_DMC0_EFFCTL() bfin_read32(DMC0_EFFCTL)
+#define bfin_write_DMC0_EFFCTL(val) bfin_write32(DMC0_EFFCTL, val)
 #define bfin_read_DMC0_STAT() bfin_read32(DMC0_STAT)
 #define bfin_write_DMC0_STAT(val) bfin_write32(DMC0_STAT, val)
 #define bfin_read_DMC0_DLLCTL() bfin_read32(DMC0_DLLCTL)
index d266787725b468ab94286aeb3e7cca62698c5d6c..33013dfcd3e1d58fb0b0f3495ce1956c76233340 100644 (file)
@@ -223,13 +223,25 @@ config M5307
        help
          Motorola ColdFire 5307 processor support.
 
+config M53xx
+       bool
+
 config M532x
        bool "MCF532x"
        depends on !MMU
+       select M53xx
        select HAVE_CACHE_CB
        help
          Freescale (Motorola) ColdFire 532x processor support.
 
+config M537x
+       bool "MCF537x"
+       depends on !MMU
+       select M53xx
+       select HAVE_CACHE_CB
+       help
+         Freescale ColdFire 537x processor support.
+
 config M5407
        bool "MCF5407"
        depends on !MMU
index 7240584d343974847fbaab832bcf50388d8d2ba3..b9ab0a69561cac3de87657e06700f8c0cf456413 100644 (file)
@@ -358,6 +358,13 @@ config COBRA5329
        help
          Support for the senTec COBRA5329 board.
 
+config M5373EVB
+       bool "Freescale M5373EVB board support"
+       depends on M537x
+       select FREESCALE
+       help
+         Support for the Freescale M5373EVB board.
+
 config M5407C3
        bool "Motorola M5407C3 board support"
        depends on M5407
@@ -539,15 +546,6 @@ config ROMVEC
          68000 type variants the vectors are at the base of the boot device
          on system startup.
 
-config ROMVECSIZE
-       hex "Size of ROM vector region (in bytes)"
-       default "0x400"
-       depends on ROM
-       help
-         Define the size of the vector region in ROM. For most 68000
-         variants this would be 0x400 bytes in size. Set to 0 if you do
-         not want a vector region at the start of the ROM.
-
 config ROMSTART
        hex "Address of the base of system image in ROM"
        default "0x400"
index 2f02acfb8edf4647e28ada0e48801b68c0c965f3..7f7830f2c5bcb488745c0326e36a1889a733e04b 100644 (file)
@@ -45,6 +45,7 @@ cpuflags-$(CONFIG_M5441x)     := $(call cc-option,-mcpu=54455,-mcfv4e)
 cpuflags-$(CONFIG_M54xx)       := $(call cc-option,-mcpu=5475,-m5200)
 cpuflags-$(CONFIG_M5407)       := $(call cc-option,-mcpu=5407,-m5200)
 cpuflags-$(CONFIG_M532x)       := $(call cc-option,-mcpu=532x,-m5307)
+cpuflags-$(CONFIG_M537x)       := $(call cc-option,-mcpu=537x,-m5307)
 cpuflags-$(CONFIG_M5307)       := $(call cc-option,-mcpu=5307,-m5200)
 cpuflags-$(CONFIG_M528x)       := $(call cc-option,-mcpu=528x,-m5307)
 cpuflags-$(CONFIG_M5275)       := $(call cc-option,-mcpu=5275,-m5307)
index a73998528d26dddbc8db3b10979a5156d8a25dca..66a36bd51aa12d120d1502610e0885411a3058a2 100644 (file)
@@ -480,23 +480,6 @@ typedef struct scc_enet {
 #define SICR_ENET_CLKRT        ((uint)0x0000003d)
 #endif
 
-#ifdef CONFIG_RPXLITE
-/* This ENET stuff is for the MPC850 with ethernet on SCC2.  Some of
- * this may be unique to the RPX-Lite configuration.
- * Note TENA is on Port B.
- */
-#define PA_ENET_RXD    ((ushort)0x0004)
-#define PA_ENET_TXD    ((ushort)0x0008)
-#define PA_ENET_TCLK   ((ushort)0x0200)
-#define PA_ENET_RCLK   ((ushort)0x0800)
-#define PB_ENET_TENA   ((uint)0x00002000)
-#define PC_ENET_CLSN   ((ushort)0x0040)
-#define PC_ENET_RENA   ((ushort)0x0080)
-
-#define SICR_ENET_MASK ((uint)0x0000ff00)
-#define SICR_ENET_CLKRT        ((uint)0x00003d00)
-#endif
-
 #ifdef CONFIG_BSEIP
 /* This ENET stuff is for the MPC823 with ethernet on SCC2.
  * This is unique to the BSE ip-Engine board.
diff --git a/arch/m68k/include/asm/dbg.h b/arch/m68k/include/asm/dbg.h
deleted file mode 100644 (file)
index 27af327..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#define DEBUG 1
-#ifdef CONFIG_COLDFIRE
-#define        BREAK asm volatile ("halt")
-#else
-#define BREAK *(volatile unsigned char *)0xdeadbee0 = 0
-#endif
index 0ff3fc6a6d9aaafda260cb0d39a70b431e6aa422..429fe26e320c9813afdf88011195552498a90032 100644 (file)
@@ -39,7 +39,7 @@
 #define MAX_M68K_DMA_CHANNELS 4
 #elif defined(CONFIG_M5272)
 #define MAX_M68K_DMA_CHANNELS 1
-#elif defined(CONFIG_M532x)
+#elif defined(CONFIG_M53xx)
 #define MAX_M68K_DMA_CHANNELS 0
 #else
 #define MAX_M68K_DMA_CHANNELS 2
diff --git a/arch/m68k/include/asm/m532xsim.h b/arch/m68k/include/asm/m532xsim.h
deleted file mode 100644 (file)
index 8668e47..0000000
+++ /dev/null
@@ -1,1241 +0,0 @@
-/****************************************************************************/
-
-/*
- *     m532xsim.h -- ColdFire 5329 registers
- */
-
-/****************************************************************************/
-#ifndef        m532xsim_h
-#define        m532xsim_h
-/****************************************************************************/
-
-#define        CPU_NAME                "COLDFIRE(m532x)"
-#define        CPU_INSTR_PER_JIFFY     3
-#define        MCF_BUSCLK              (MCF_CLK / 3)
-
-#include <asm/m53xxacr.h>
-
-#define MCFINT_VECBASE      64
-#define MCFINT_UART0        26          /* Interrupt number for UART0 */
-#define MCFINT_UART1        27          /* Interrupt number for UART1 */
-#define MCFINT_UART2        28          /* Interrupt number for UART2 */
-#define MCFINT_QSPI         31          /* Interrupt number for QSPI */
-#define MCFINT_FECRX0      36          /* Interrupt number for FEC */
-#define MCFINT_FECTX0      40          /* Interrupt number for FEC */
-#define MCFINT_FECENTC0            42          /* Interrupt number for FEC */
-
-#define MCF_IRQ_UART0       (MCFINT_VECBASE + MCFINT_UART0)
-#define MCF_IRQ_UART1       (MCFINT_VECBASE + MCFINT_UART1)
-#define MCF_IRQ_UART2       (MCFINT_VECBASE + MCFINT_UART2)
-
-#define MCF_IRQ_FECRX0     (MCFINT_VECBASE + MCFINT_FECRX0)
-#define MCF_IRQ_FECTX0     (MCFINT_VECBASE + MCFINT_FECTX0)
-#define MCF_IRQ_FECENTC0    (MCFINT_VECBASE + MCFINT_FECENTC0)
-
-#define        MCF_IRQ_QSPI        (MCFINT_VECBASE + MCFINT_QSPI)
-
-#define MCF_WTM_WCR            0xFC098000
-
-/*
- *     Define the 532x SIM register set addresses.
- */
-#define        MCFSIM_IPRL             0xFC048004
-#define        MCFSIM_IPRH             0xFC048000
-#define        MCFSIM_IPR              MCFSIM_IPRL
-#define        MCFSIM_IMRL             0xFC04800C
-#define        MCFSIM_IMRH             0xFC048008
-#define        MCFSIM_IMR              MCFSIM_IMRL
-#define        MCFSIM_ICR0             0xFC048040      
-#define        MCFSIM_ICR1             0xFC048041      
-#define        MCFSIM_ICR2             0xFC048042      
-#define        MCFSIM_ICR3             0xFC048043      
-#define        MCFSIM_ICR4             0xFC048044      
-#define        MCFSIM_ICR5             0xFC048045      
-#define        MCFSIM_ICR6             0xFC048046      
-#define        MCFSIM_ICR7             0xFC048047      
-#define        MCFSIM_ICR8             0xFC048048      
-#define        MCFSIM_ICR9             0xFC048049      
-#define        MCFSIM_ICR10            0xFC04804A
-#define        MCFSIM_ICR11            0xFC04804B
-
-/*
- *     Some symbol defines for the above...
- */
-#define        MCFSIM_SWDICR           MCFSIM_ICR0     /* Watchdog timer ICR */
-#define        MCFSIM_TIMER1ICR        MCFSIM_ICR1     /* Timer 1 ICR */
-#define        MCFSIM_TIMER2ICR        MCFSIM_ICR2     /* Timer 2 ICR */
-#define        MCFSIM_UART1ICR         MCFSIM_ICR4     /* UART 1 ICR */
-#define        MCFSIM_UART2ICR         MCFSIM_ICR5     /* UART 2 ICR */
-#define        MCFSIM_DMA0ICR          MCFSIM_ICR6     /* DMA 0 ICR */
-#define        MCFSIM_DMA1ICR          MCFSIM_ICR7     /* DMA 1 ICR */
-#define        MCFSIM_DMA2ICR          MCFSIM_ICR8     /* DMA 2 ICR */
-#define        MCFSIM_DMA3ICR          MCFSIM_ICR9     /* DMA 3 ICR */
-
-
-#define        MCFINTC0_SIMR           0xFC04801C
-#define        MCFINTC0_CIMR           0xFC04801D
-#define        MCFINTC0_ICR0           0xFC048040
-#define        MCFINTC1_SIMR           0xFC04C01C
-#define        MCFINTC1_CIMR           0xFC04C01D
-#define        MCFINTC1_ICR0           0xFC04C040
-#define MCFINTC2_SIMR          (0)
-#define MCFINTC2_CIMR          (0)
-#define MCFINTC2_ICR0          (0)
-
-#define MCFSIM_ICR_TIMER1      (0xFC048040+32)
-#define MCFSIM_ICR_TIMER2      (0xFC048040+33)
-
-/*
- *     Define system peripheral IRQ usage.
- */
-#define        MCF_IRQ_TIMER           (64 + 32)       /* Timer0 */
-#define        MCF_IRQ_PROFILER        (64 + 33)       /* Timer1 */
-
-/*
- *  UART module.
- */
-#define MCFUART_BASE0          0xFC060000      /* Base address of UART1 */
-#define MCFUART_BASE1          0xFC064000      /* Base address of UART2 */
-#define MCFUART_BASE2          0xFC068000      /* Base address of UART3 */
-
-/*
- *  FEC module.
- */
-#define        MCFFEC_BASE0            0xFC030000      /* Base address of FEC0 */
-#define        MCFFEC_SIZE0            0x800           /* Size of FEC0 region */
-
-/*
- *  QSPI module.
- */
-#define        MCFQSPI_BASE            0xFC058000      /* Base address of QSPI */
-#define        MCFQSPI_SIZE            0x40            /* Size of QSPI region */
-
-#define        MCFQSPI_CS0             84
-#define        MCFQSPI_CS1             85
-#define        MCFQSPI_CS2             86
-
-/*
- *  Timer module.
- */
-#define MCFTIMER_BASE1         0xFC070000      /* Base address of TIMER1 */
-#define MCFTIMER_BASE2         0xFC074000      /* Base address of TIMER2 */
-#define MCFTIMER_BASE3         0xFC078000      /* Base address of TIMER3 */
-#define MCFTIMER_BASE4         0xFC07C000      /* Base address of TIMER4 */
-
-/*********************************************************************
- *
- * Reset Controller Module
- *
- *********************************************************************/
-
-#define        MCF_RCR                 0xFC0A0000
-#define        MCF_RSR                 0xFC0A0001
-
-#define        MCF_RCR_SWRESET         0x80            /* Software reset bit */
-#define        MCF_RCR_FRCSTOUT        0x40            /* Force external reset */
-
-
-/*
- * Power Management
- */
-#define MCFPM_WCR              0xfc040013
-#define MCFPM_PPMSR0           0xfc04002c
-#define MCFPM_PPMCR0           0xfc04002d
-#define MCFPM_PPMSR1           0xfc04002e
-#define MCFPM_PPMCR1           0xfc04002f
-#define MCFPM_PPMHR0           0xfc040030
-#define MCFPM_PPMLR0           0xfc040034
-#define MCFPM_PPMHR1           0xfc040038
-#define MCFPM_LPCR             0xec090007
-
-/*
- *     The M5329EVB board needs a help getting its devices initialized 
- *     at kernel start time if dBUG doesn't set it up (for example 
- *     it is not used), so we need to do it manually.
- */
-#ifdef __ASSEMBLER__
-.macro m5329EVB_setup
-       movel   #0xFC098000, %a7
-       movel   #0x0, (%a7)
-#define CORE_SRAM      0x80000000      
-#define CORE_SRAM_SIZE 0x8000
-       movel   #CORE_SRAM, %d0
-       addl    #0x221, %d0
-       movec   %d0,%RAMBAR1
-       movel   #CORE_SRAM, %sp
-       addl    #CORE_SRAM_SIZE, %sp
-       jsr     sysinit
-.endm
-#define        PLATFORM_SETUP  m5329EVB_setup
-
-#endif /* __ASSEMBLER__ */
-
-/*********************************************************************
- *
- * Chip Configuration Module (CCM)
- *
- *********************************************************************/
-
-/* Register read/write macros */
-#define MCF_CCM_CCR               0xFC0A0004
-#define MCF_CCM_RCON              0xFC0A0008
-#define MCF_CCM_CIR               0xFC0A000A
-#define MCF_CCM_MISCCR            0xFC0A0010
-#define MCF_CCM_CDR               0xFC0A0012
-#define MCF_CCM_UHCSR             0xFC0A0014
-#define MCF_CCM_UOCSR             0xFC0A0016
-
-/* Bit definitions and macros for MCF_CCM_CCR */
-#define MCF_CCM_CCR_RESERVED      (0x0001)
-#define MCF_CCM_CCR_PLL_MODE      (0x0003)
-#define MCF_CCM_CCR_OSC_MODE      (0x0005)
-#define MCF_CCM_CCR_BOOTPS(x)     (((x)&0x0003)<<3|0x0001)
-#define MCF_CCM_CCR_LOAD          (0x0021)
-#define MCF_CCM_CCR_LIMP          (0x0041)
-#define MCF_CCM_CCR_CSC(x)        (((x)&0x0003)<<8|0x0001)
-
-/* Bit definitions and macros for MCF_CCM_RCON */
-#define MCF_CCM_RCON_RESERVED     (0x0001)
-#define MCF_CCM_RCON_PLL_MODE     (0x0003)
-#define MCF_CCM_RCON_OSC_MODE     (0x0005)
-#define MCF_CCM_RCON_BOOTPS(x)    (((x)&0x0003)<<3|0x0001)
-#define MCF_CCM_RCON_LOAD         (0x0021)
-#define MCF_CCM_RCON_LIMP         (0x0041)
-#define MCF_CCM_RCON_CSC(x)       (((x)&0x0003)<<8|0x0001)
-
-/* Bit definitions and macros for MCF_CCM_CIR */
-#define MCF_CCM_CIR_PRN(x)        (((x)&0x003F)<<0)
-#define MCF_CCM_CIR_PIN(x)        (((x)&0x03FF)<<6)
-
-/* Bit definitions and macros for MCF_CCM_MISCCR */
-#define MCF_CCM_MISCCR_USBSRC     (0x0001)
-#define MCF_CCM_MISCCR_USBDIV     (0x0002)
-#define MCF_CCM_MISCCR_SSI_SRC    (0x0010)
-#define MCF_CCM_MISCCR_TIM_DMA   (0x0020)
-#define MCF_CCM_MISCCR_SSI_PUS    (0x0040)
-#define MCF_CCM_MISCCR_SSI_PUE    (0x0080)
-#define MCF_CCM_MISCCR_LCD_CHEN   (0x0100)
-#define MCF_CCM_MISCCR_LIMP       (0x1000)
-#define MCF_CCM_MISCCR_PLL_LOCK   (0x2000)
-
-/* Bit definitions and macros for MCF_CCM_CDR */
-#define MCF_CCM_CDR_SSIDIV(x)     (((x)&0x000F)<<0)
-#define MCF_CCM_CDR_LPDIV(x)      (((x)&0x000F)<<8)
-
-/* Bit definitions and macros for MCF_CCM_UHCSR */
-#define MCF_CCM_UHCSR_XPDE        (0x0001)
-#define MCF_CCM_UHCSR_UHMIE       (0x0002)
-#define MCF_CCM_UHCSR_WKUP        (0x0004)
-#define MCF_CCM_UHCSR_PORTIND(x)  (((x)&0x0003)<<14)
-
-/* Bit definitions and macros for MCF_CCM_UOCSR */
-#define MCF_CCM_UOCSR_XPDE        (0x0001)
-#define MCF_CCM_UOCSR_UOMIE       (0x0002)
-#define MCF_CCM_UOCSR_WKUP        (0x0004)
-#define MCF_CCM_UOCSR_PWRFLT      (0x0008)
-#define MCF_CCM_UOCSR_SEND        (0x0010)
-#define MCF_CCM_UOCSR_VVLD        (0x0020)
-#define MCF_CCM_UOCSR_BVLD        (0x0040)
-#define MCF_CCM_UOCSR_AVLD        (0x0080)
-#define MCF_CCM_UOCSR_DPPU        (0x0100)
-#define MCF_CCM_UOCSR_DCR_VBUS    (0x0200)
-#define MCF_CCM_UOCSR_CRG_VBUS    (0x0400)
-#define MCF_CCM_UOCSR_DRV_VBUS    (0x0800)
-#define MCF_CCM_UOCSR_DMPD        (0x1000)
-#define MCF_CCM_UOCSR_DPPD        (0x2000)
-#define MCF_CCM_UOCSR_PORTIND(x)  (((x)&0x0003)<<14)
-
-/*********************************************************************
- *
- * FlexBus Chip Selects (FBCS)
- *
- *********************************************************************/
-
-/* Register read/write macros */
-#define MCF_FBCS0_CSAR         0xFC008000
-#define MCF_FBCS0_CSMR         0xFC008004
-#define MCF_FBCS0_CSCR         0xFC008008
-#define MCF_FBCS1_CSAR         0xFC00800C
-#define MCF_FBCS1_CSMR         0xFC008010
-#define MCF_FBCS1_CSCR         0xFC008014
-#define MCF_FBCS2_CSAR         0xFC008018
-#define MCF_FBCS2_CSMR         0xFC00801C
-#define MCF_FBCS2_CSCR         0xFC008020
-#define MCF_FBCS3_CSAR         0xFC008024
-#define MCF_FBCS3_CSMR         0xFC008028
-#define MCF_FBCS3_CSCR         0xFC00802C
-#define MCF_FBCS4_CSAR         0xFC008030
-#define MCF_FBCS4_CSMR         0xFC008034
-#define MCF_FBCS4_CSCR         0xFC008038
-#define MCF_FBCS5_CSAR         0xFC00803C
-#define MCF_FBCS5_CSMR         0xFC008040
-#define MCF_FBCS5_CSCR         0xFC008044
-
-/* Bit definitions and macros for MCF_FBCS_CSAR */
-#define MCF_FBCS_CSAR_BA(x)    ((x)&0xFFFF0000)
-
-/* Bit definitions and macros for MCF_FBCS_CSMR */
-#define MCF_FBCS_CSMR_V                (0x00000001)
-#define MCF_FBCS_CSMR_WP       (0x00000100)
-#define MCF_FBCS_CSMR_BAM(x)   (((x)&0x0000FFFF)<<16)
-#define MCF_FBCS_CSMR_BAM_4G   (0xFFFF0000)
-#define MCF_FBCS_CSMR_BAM_2G   (0x7FFF0000)
-#define MCF_FBCS_CSMR_BAM_1G   (0x3FFF0000)
-#define MCF_FBCS_CSMR_BAM_1024M        (0x3FFF0000)
-#define MCF_FBCS_CSMR_BAM_512M (0x1FFF0000)
-#define MCF_FBCS_CSMR_BAM_256M (0x0FFF0000)
-#define MCF_FBCS_CSMR_BAM_128M (0x07FF0000)
-#define MCF_FBCS_CSMR_BAM_64M  (0x03FF0000)
-#define MCF_FBCS_CSMR_BAM_32M  (0x01FF0000)
-#define MCF_FBCS_CSMR_BAM_16M  (0x00FF0000)
-#define MCF_FBCS_CSMR_BAM_8M   (0x007F0000)
-#define MCF_FBCS_CSMR_BAM_4M   (0x003F0000)
-#define MCF_FBCS_CSMR_BAM_2M   (0x001F0000)
-#define MCF_FBCS_CSMR_BAM_1M   (0x000F0000)
-#define MCF_FBCS_CSMR_BAM_1024K        (0x000F0000)
-#define MCF_FBCS_CSMR_BAM_512K (0x00070000)
-#define MCF_FBCS_CSMR_BAM_256K (0x00030000)
-#define MCF_FBCS_CSMR_BAM_128K (0x00010000)
-#define MCF_FBCS_CSMR_BAM_64K  (0x00000000)
-
-/* Bit definitions and macros for MCF_FBCS_CSCR */
-#define MCF_FBCS_CSCR_BSTW     (0x00000008)
-#define MCF_FBCS_CSCR_BSTR     (0x00000010)
-#define MCF_FBCS_CSCR_BEM      (0x00000020)
-#define MCF_FBCS_CSCR_PS(x)    (((x)&0x00000003)<<6)
-#define MCF_FBCS_CSCR_AA       (0x00000100)
-#define MCF_FBCS_CSCR_SBM      (0x00000200)
-#define MCF_FBCS_CSCR_WS(x)    (((x)&0x0000003F)<<10)
-#define MCF_FBCS_CSCR_WRAH(x)  (((x)&0x00000003)<<16)
-#define MCF_FBCS_CSCR_RDAH(x)  (((x)&0x00000003)<<18)
-#define MCF_FBCS_CSCR_ASET(x)  (((x)&0x00000003)<<20)
-#define MCF_FBCS_CSCR_SWSEN    (0x00800000)
-#define MCF_FBCS_CSCR_SWS(x)   (((x)&0x0000003F)<<26)
-#define MCF_FBCS_CSCR_PS_8     (0x0040)
-#define MCF_FBCS_CSCR_PS_16    (0x0080)
-#define MCF_FBCS_CSCR_PS_32    (0x0000)
-
-/*********************************************************************
- *
- * General Purpose I/O (GPIO)
- *
- *********************************************************************/
-
-/* Register read/write macros */
-#define MCFGPIO_PODR_FECH              (0xFC0A4000)
-#define MCFGPIO_PODR_FECL              (0xFC0A4001)
-#define MCFGPIO_PODR_SSI               (0xFC0A4002)
-#define MCFGPIO_PODR_BUSCTL            (0xFC0A4003)
-#define MCFGPIO_PODR_BE                        (0xFC0A4004)
-#define MCFGPIO_PODR_CS                        (0xFC0A4005)
-#define MCFGPIO_PODR_PWM               (0xFC0A4006)
-#define MCFGPIO_PODR_FECI2C            (0xFC0A4007)
-#define MCFGPIO_PODR_UART              (0xFC0A4009)
-#define MCFGPIO_PODR_QSPI              (0xFC0A400A)
-#define MCFGPIO_PODR_TIMER             (0xFC0A400B)
-#define MCFGPIO_PODR_LCDDATAH          (0xFC0A400D)
-#define MCFGPIO_PODR_LCDDATAM          (0xFC0A400E)
-#define MCFGPIO_PODR_LCDDATAL          (0xFC0A400F)
-#define MCFGPIO_PODR_LCDCTLH           (0xFC0A4010)
-#define MCFGPIO_PODR_LCDCTLL           (0xFC0A4011)
-#define MCFGPIO_PDDR_FECH              (0xFC0A4014)
-#define MCFGPIO_PDDR_FECL              (0xFC0A4015)
-#define MCFGPIO_PDDR_SSI               (0xFC0A4016)
-#define MCFGPIO_PDDR_BUSCTL            (0xFC0A4017)
-#define MCFGPIO_PDDR_BE                        (0xFC0A4018)
-#define MCFGPIO_PDDR_CS                        (0xFC0A4019)
-#define MCFGPIO_PDDR_PWM               (0xFC0A401A)
-#define MCFGPIO_PDDR_FECI2C            (0xFC0A401B)
-#define MCFGPIO_PDDR_UART              (0xFC0A401C)
-#define MCFGPIO_PDDR_QSPI              (0xFC0A401E)
-#define MCFGPIO_PDDR_TIMER             (0xFC0A401F)
-#define MCFGPIO_PDDR_LCDDATAH          (0xFC0A4021)
-#define MCFGPIO_PDDR_LCDDATAM          (0xFC0A4022)
-#define MCFGPIO_PDDR_LCDDATAL          (0xFC0A4023)
-#define MCFGPIO_PDDR_LCDCTLH           (0xFC0A4024)
-#define MCFGPIO_PDDR_LCDCTLL           (0xFC0A4025)
-#define MCFGPIO_PPDSDR_FECH            (0xFC0A4028)
-#define MCFGPIO_PPDSDR_FECL            (0xFC0A4029)
-#define MCFGPIO_PPDSDR_SSI             (0xFC0A402A)
-#define MCFGPIO_PPDSDR_BUSCTL          (0xFC0A402B)
-#define MCFGPIO_PPDSDR_BE              (0xFC0A402C)
-#define MCFGPIO_PPDSDR_CS              (0xFC0A402D)
-#define MCFGPIO_PPDSDR_PWM             (0xFC0A402E)
-#define MCFGPIO_PPDSDR_FECI2C          (0xFC0A402F)
-#define MCFGPIO_PPDSDR_UART            (0xFC0A4031)
-#define MCFGPIO_PPDSDR_QSPI            (0xFC0A4032)
-#define MCFGPIO_PPDSDR_TIMER           (0xFC0A4033)
-#define MCFGPIO_PPDSDR_LCDDATAH                (0xFC0A4035)
-#define MCFGPIO_PPDSDR_LCDDATAM                (0xFC0A4036)
-#define MCFGPIO_PPDSDR_LCDDATAL                (0xFC0A4037)
-#define MCFGPIO_PPDSDR_LCDCTLH         (0xFC0A4038)
-#define MCFGPIO_PPDSDR_LCDCTLL         (0xFC0A4039)
-#define MCFGPIO_PCLRR_FECH             (0xFC0A403C)
-#define MCFGPIO_PCLRR_FECL             (0xFC0A403D)
-#define MCFGPIO_PCLRR_SSI              (0xFC0A403E)
-#define MCFGPIO_PCLRR_BUSCTL           (0xFC0A403F)
-#define MCFGPIO_PCLRR_BE               (0xFC0A4040)
-#define MCFGPIO_PCLRR_CS               (0xFC0A4041)
-#define MCFGPIO_PCLRR_PWM              (0xFC0A4042)
-#define MCFGPIO_PCLRR_FECI2C           (0xFC0A4043)
-#define MCFGPIO_PCLRR_UART             (0xFC0A4045)
-#define MCFGPIO_PCLRR_QSPI             (0xFC0A4046)
-#define MCFGPIO_PCLRR_TIMER            (0xFC0A4047)
-#define MCFGPIO_PCLRR_LCDDATAH         (0xFC0A4049)
-#define MCFGPIO_PCLRR_LCDDATAM         (0xFC0A404A)
-#define MCFGPIO_PCLRR_LCDDATAL         (0xFC0A404B)
-#define MCFGPIO_PCLRR_LCDCTLH          (0xFC0A404C)
-#define MCFGPIO_PCLRR_LCDCTLL          (0xFC0A404D)
-#define MCFGPIO_PAR_FEC                        (0xFC0A4050)
-#define MCFGPIO_PAR_PWM                        (0xFC0A4051)
-#define MCFGPIO_PAR_BUSCTL             (0xFC0A4052)
-#define MCFGPIO_PAR_FECI2C             (0xFC0A4053)
-#define MCFGPIO_PAR_BE                 (0xFC0A4054)
-#define MCFGPIO_PAR_CS                 (0xFC0A4055)
-#define MCFGPIO_PAR_SSI                        (0xFC0A4056)
-#define MCFGPIO_PAR_UART               (0xFC0A4058)
-#define MCFGPIO_PAR_QSPI               (0xFC0A405A)
-#define MCFGPIO_PAR_TIMER              (0xFC0A405C)
-#define MCFGPIO_PAR_LCDDATA            (0xFC0A405D)
-#define MCFGPIO_PAR_LCDCTL             (0xFC0A405E)
-#define MCFGPIO_PAR_IRQ                        (0xFC0A4060)
-#define MCFGPIO_MSCR_FLEXBUS           (0xFC0A4064)
-#define MCFGPIO_MSCR_SDRAM             (0xFC0A4065)
-#define MCFGPIO_DSCR_I2C               (0xFC0A4068)
-#define MCFGPIO_DSCR_PWM               (0xFC0A4069)
-#define MCFGPIO_DSCR_FEC               (0xFC0A406A)
-#define MCFGPIO_DSCR_UART              (0xFC0A406B)
-#define MCFGPIO_DSCR_QSPI              (0xFC0A406C)
-#define MCFGPIO_DSCR_TIMER             (0xFC0A406D)
-#define MCFGPIO_DSCR_SSI               (0xFC0A406E)
-#define MCFGPIO_DSCR_LCD               (0xFC0A406F)
-#define MCFGPIO_DSCR_DEBUG             (0xFC0A4070)
-#define MCFGPIO_DSCR_CLKRST            (0xFC0A4071)
-#define MCFGPIO_DSCR_IRQ               (0xFC0A4072)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_FECH */
-#define MCF_GPIO_PODR_FECH_PODR_FECH0              (0x01)
-#define MCF_GPIO_PODR_FECH_PODR_FECH1              (0x02)
-#define MCF_GPIO_PODR_FECH_PODR_FECH2              (0x04)
-#define MCF_GPIO_PODR_FECH_PODR_FECH3              (0x08)
-#define MCF_GPIO_PODR_FECH_PODR_FECH4              (0x10)
-#define MCF_GPIO_PODR_FECH_PODR_FECH5              (0x20)
-#define MCF_GPIO_PODR_FECH_PODR_FECH6              (0x40)
-#define MCF_GPIO_PODR_FECH_PODR_FECH7              (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_FECL */
-#define MCF_GPIO_PODR_FECL_PODR_FECL0              (0x01)
-#define MCF_GPIO_PODR_FECL_PODR_FECL1              (0x02)
-#define MCF_GPIO_PODR_FECL_PODR_FECL2              (0x04)
-#define MCF_GPIO_PODR_FECL_PODR_FECL3              (0x08)
-#define MCF_GPIO_PODR_FECL_PODR_FECL4              (0x10)
-#define MCF_GPIO_PODR_FECL_PODR_FECL5              (0x20)
-#define MCF_GPIO_PODR_FECL_PODR_FECL6              (0x40)
-#define MCF_GPIO_PODR_FECL_PODR_FECL7              (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_SSI */
-#define MCF_GPIO_PODR_SSI_PODR_SSI0                (0x01)
-#define MCF_GPIO_PODR_SSI_PODR_SSI1                (0x02)
-#define MCF_GPIO_PODR_SSI_PODR_SSI2                (0x04)
-#define MCF_GPIO_PODR_SSI_PODR_SSI3                (0x08)
-#define MCF_GPIO_PODR_SSI_PODR_SSI4                (0x10)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_BUSCTL */
-#define MCF_GPIO_PODR_BUSCTL_POSDR_BUSCTL0         (0x01)
-#define MCF_GPIO_PODR_BUSCTL_PODR_BUSCTL1          (0x02)
-#define MCF_GPIO_PODR_BUSCTL_PODR_BUSCTL2          (0x04)
-#define MCF_GPIO_PODR_BUSCTL_PODR_BUSCTL3          (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_BE */
-#define MCF_GPIO_PODR_BE_PODR_BE0                  (0x01)
-#define MCF_GPIO_PODR_BE_PODR_BE1                  (0x02)
-#define MCF_GPIO_PODR_BE_PODR_BE2                  (0x04)
-#define MCF_GPIO_PODR_BE_PODR_BE3                  (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_CS */
-#define MCF_GPIO_PODR_CS_PODR_CS1                  (0x02)
-#define MCF_GPIO_PODR_CS_PODR_CS2                  (0x04)
-#define MCF_GPIO_PODR_CS_PODR_CS3                  (0x08)
-#define MCF_GPIO_PODR_CS_PODR_CS4                  (0x10)
-#define MCF_GPIO_PODR_CS_PODR_CS5                  (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_PWM */
-#define MCF_GPIO_PODR_PWM_PODR_PWM2                (0x04)
-#define MCF_GPIO_PODR_PWM_PODR_PWM3                (0x08)
-#define MCF_GPIO_PODR_PWM_PODR_PWM4                (0x10)
-#define MCF_GPIO_PODR_PWM_PODR_PWM5                (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_FECI2C */
-#define MCF_GPIO_PODR_FECI2C_PODR_FECI2C0          (0x01)
-#define MCF_GPIO_PODR_FECI2C_PODR_FECI2C1          (0x02)
-#define MCF_GPIO_PODR_FECI2C_PODR_FECI2C2          (0x04)
-#define MCF_GPIO_PODR_FECI2C_PODR_FECI2C3          (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_UART */
-#define MCF_GPIO_PODR_UART_PODR_UART0              (0x01)
-#define MCF_GPIO_PODR_UART_PODR_UART1              (0x02)
-#define MCF_GPIO_PODR_UART_PODR_UART2              (0x04)
-#define MCF_GPIO_PODR_UART_PODR_UART3              (0x08)
-#define MCF_GPIO_PODR_UART_PODR_UART4              (0x10)
-#define MCF_GPIO_PODR_UART_PODR_UART5              (0x20)
-#define MCF_GPIO_PODR_UART_PODR_UART6              (0x40)
-#define MCF_GPIO_PODR_UART_PODR_UART7              (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_QSPI */
-#define MCF_GPIO_PODR_QSPI_PODR_QSPI0              (0x01)
-#define MCF_GPIO_PODR_QSPI_PODR_QSPI1              (0x02)
-#define MCF_GPIO_PODR_QSPI_PODR_QSPI2              (0x04)
-#define MCF_GPIO_PODR_QSPI_PODR_QSPI3              (0x08)
-#define MCF_GPIO_PODR_QSPI_PODR_QSPI4              (0x10)
-#define MCF_GPIO_PODR_QSPI_PODR_QSPI5              (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_TIMER */
-#define MCF_GPIO_PODR_TIMER_PODR_TIMER0            (0x01)
-#define MCF_GPIO_PODR_TIMER_PODR_TIMER1            (0x02)
-#define MCF_GPIO_PODR_TIMER_PODR_TIMER2            (0x04)
-#define MCF_GPIO_PODR_TIMER_PODR_TIMER3            (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_LCDDATAH */
-#define MCF_GPIO_PODR_LCDDATAH_PODR_LCDDATAH0      (0x01)
-#define MCF_GPIO_PODR_LCDDATAH_PODR_LCDDATAH1      (0x02)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_LCDDATAM */
-#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM0      (0x01)
-#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM1      (0x02)
-#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM2      (0x04)
-#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM3      (0x08)
-#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM4      (0x10)
-#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM5      (0x20)
-#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM6      (0x40)
-#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM7      (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_LCDDATAL */
-#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL0      (0x01)
-#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL1      (0x02)
-#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL2      (0x04)
-#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL3      (0x08)
-#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL4      (0x10)
-#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL5      (0x20)
-#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL6      (0x40)
-#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL7      (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_LCDCTLH */
-#define MCF_GPIO_PODR_LCDCTLH_PODR_LCDCTLH0        (0x01)
-
-/* Bit definitions and macros for MCF_GPIO_PODR_LCDCTLL */
-#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL0        (0x01)
-#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL1        (0x02)
-#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL2        (0x04)
-#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL3        (0x08)
-#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL4        (0x10)
-#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL5        (0x20)
-#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL6        (0x40)
-#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL7        (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_FECH */
-#define MCF_GPIO_PDDR_FECH_PDDR_FECH0              (0x01)
-#define MCF_GPIO_PDDR_FECH_PDDR_FECH1              (0x02)
-#define MCF_GPIO_PDDR_FECH_PDDR_FECH2              (0x04)
-#define MCF_GPIO_PDDR_FECH_PDDR_FECH3              (0x08)
-#define MCF_GPIO_PDDR_FECH_PDDR_FECH4              (0x10)
-#define MCF_GPIO_PDDR_FECH_PDDR_FECH5              (0x20)
-#define MCF_GPIO_PDDR_FECH_PDDR_FECH6              (0x40)
-#define MCF_GPIO_PDDR_FECH_PDDR_FECH7              (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_FECL */
-#define MCF_GPIO_PDDR_FECL_PDDR_FECL0              (0x01)
-#define MCF_GPIO_PDDR_FECL_PDDR_FECL1              (0x02)
-#define MCF_GPIO_PDDR_FECL_PDDR_FECL2              (0x04)
-#define MCF_GPIO_PDDR_FECL_PDDR_FECL3              (0x08)
-#define MCF_GPIO_PDDR_FECL_PDDR_FECL4              (0x10)
-#define MCF_GPIO_PDDR_FECL_PDDR_FECL5              (0x20)
-#define MCF_GPIO_PDDR_FECL_PDDR_FECL6              (0x40)
-#define MCF_GPIO_PDDR_FECL_PDDR_FECL7              (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_SSI */
-#define MCF_GPIO_PDDR_SSI_PDDR_SSI0                (0x01)
-#define MCF_GPIO_PDDR_SSI_PDDR_SSI1                (0x02)
-#define MCF_GPIO_PDDR_SSI_PDDR_SSI2                (0x04)
-#define MCF_GPIO_PDDR_SSI_PDDR_SSI3                (0x08)
-#define MCF_GPIO_PDDR_SSI_PDDR_SSI4                (0x10)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_BUSCTL */
-#define MCF_GPIO_PDDR_BUSCTL_POSDR_BUSCTL0         (0x01)
-#define MCF_GPIO_PDDR_BUSCTL_PDDR_BUSCTL1          (0x02)
-#define MCF_GPIO_PDDR_BUSCTL_PDDR_BUSCTL2          (0x04)
-#define MCF_GPIO_PDDR_BUSCTL_PDDR_BUSCTL3          (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_BE */
-#define MCF_GPIO_PDDR_BE_PDDR_BE0                  (0x01)
-#define MCF_GPIO_PDDR_BE_PDDR_BE1                  (0x02)
-#define MCF_GPIO_PDDR_BE_PDDR_BE2                  (0x04)
-#define MCF_GPIO_PDDR_BE_PDDR_BE3                  (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_CS */
-#define MCF_GPIO_PDDR_CS_PDDR_CS1                  (0x02)
-#define MCF_GPIO_PDDR_CS_PDDR_CS2                  (0x04)
-#define MCF_GPIO_PDDR_CS_PDDR_CS3                  (0x08)
-#define MCF_GPIO_PDDR_CS_PDDR_CS4                  (0x10)
-#define MCF_GPIO_PDDR_CS_PDDR_CS5                  (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_PWM */
-#define MCF_GPIO_PDDR_PWM_PDDR_PWM2                (0x04)
-#define MCF_GPIO_PDDR_PWM_PDDR_PWM3                (0x08)
-#define MCF_GPIO_PDDR_PWM_PDDR_PWM4                (0x10)
-#define MCF_GPIO_PDDR_PWM_PDDR_PWM5                (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_FECI2C */
-#define MCF_GPIO_PDDR_FECI2C_PDDR_FECI2C0          (0x01)
-#define MCF_GPIO_PDDR_FECI2C_PDDR_FECI2C1          (0x02)
-#define MCF_GPIO_PDDR_FECI2C_PDDR_FECI2C2          (0x04)
-#define MCF_GPIO_PDDR_FECI2C_PDDR_FECI2C3          (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_UART */
-#define MCF_GPIO_PDDR_UART_PDDR_UART0              (0x01)
-#define MCF_GPIO_PDDR_UART_PDDR_UART1              (0x02)
-#define MCF_GPIO_PDDR_UART_PDDR_UART2              (0x04)
-#define MCF_GPIO_PDDR_UART_PDDR_UART3              (0x08)
-#define MCF_GPIO_PDDR_UART_PDDR_UART4              (0x10)
-#define MCF_GPIO_PDDR_UART_PDDR_UART5              (0x20)
-#define MCF_GPIO_PDDR_UART_PDDR_UART6              (0x40)
-#define MCF_GPIO_PDDR_UART_PDDR_UART7              (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_QSPI */
-#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI0              (0x01)
-#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI1              (0x02)
-#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI2              (0x04)
-#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI3              (0x08)
-#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI4              (0x10)
-#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI5              (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_TIMER */
-#define MCF_GPIO_PDDR_TIMER_PDDR_TIMER0            (0x01)
-#define MCF_GPIO_PDDR_TIMER_PDDR_TIMER1            (0x02)
-#define MCF_GPIO_PDDR_TIMER_PDDR_TIMER2            (0x04)
-#define MCF_GPIO_PDDR_TIMER_PDDR_TIMER3            (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_LCDDATAH */
-#define MCF_GPIO_PDDR_LCDDATAH_PDDR_LCDDATAH0      (0x01)
-#define MCF_GPIO_PDDR_LCDDATAH_PDDR_LCDDATAH1      (0x02)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_LCDDATAM */
-#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM0      (0x01)
-#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM1      (0x02)
-#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM2      (0x04)
-#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM3      (0x08)
-#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM4      (0x10)
-#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM5      (0x20)
-#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM6      (0x40)
-#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM7      (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_LCDDATAL */
-#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL0      (0x01)
-#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL1      (0x02)
-#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL2      (0x04)
-#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL3      (0x08)
-#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL4      (0x10)
-#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL5      (0x20)
-#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL6      (0x40)
-#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL7      (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_LCDCTLH */
-#define MCF_GPIO_PDDR_LCDCTLH_PDDR_LCDCTLH0        (0x01)
-
-/* Bit definitions and macros for MCF_GPIO_PDDR_LCDCTLL */
-#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL0        (0x01)
-#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL1        (0x02)
-#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL2        (0x04)
-#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL3        (0x08)
-#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL4        (0x10)
-#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL5        (0x20)
-#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL6        (0x40)
-#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL7        (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_FECH */
-#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH0          (0x01)
-#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH1          (0x02)
-#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH2          (0x04)
-#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH3          (0x08)
-#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH4          (0x10)
-#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH5          (0x20)
-#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH6          (0x40)
-#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH7          (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_FECL */
-#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL0          (0x01)
-#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL1          (0x02)
-#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL2          (0x04)
-#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL3          (0x08)
-#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL4          (0x10)
-#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL5          (0x20)
-#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL6          (0x40)
-#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL7          (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_SSI */
-#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI0            (0x01)
-#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI1            (0x02)
-#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI2            (0x04)
-#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI3            (0x08)
-#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI4            (0x10)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_BUSCTL */
-#define MCF_GPIO_PPDSDR_BUSCTL_POSDR_BUSCTL0       (0x01)
-#define MCF_GPIO_PPDSDR_BUSCTL_PPDSDR_BUSCTL1      (0x02)
-#define MCF_GPIO_PPDSDR_BUSCTL_PPDSDR_BUSCTL2      (0x04)
-#define MCF_GPIO_PPDSDR_BUSCTL_PPDSDR_BUSCTL3      (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_BE */
-#define MCF_GPIO_PPDSDR_BE_PPDSDR_BE0              (0x01)
-#define MCF_GPIO_PPDSDR_BE_PPDSDR_BE1              (0x02)
-#define MCF_GPIO_PPDSDR_BE_PPDSDR_BE2              (0x04)
-#define MCF_GPIO_PPDSDR_BE_PPDSDR_BE3              (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_CS */
-#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS1              (0x02)
-#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS2              (0x04)
-#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS3              (0x08)
-#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS4              (0x10)
-#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS5              (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_PWM */
-#define MCF_GPIO_PPDSDR_PWM_PPDSDR_PWM2            (0x04)
-#define MCF_GPIO_PPDSDR_PWM_PPDSDR_PWM3            (0x08)
-#define MCF_GPIO_PPDSDR_PWM_PPDSDR_PWM4            (0x10)
-#define MCF_GPIO_PPDSDR_PWM_PPDSDR_PWM5            (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_FECI2C */
-#define MCF_GPIO_PPDSDR_FECI2C_PPDSDR_FECI2C0      (0x01)
-#define MCF_GPIO_PPDSDR_FECI2C_PPDSDR_FECI2C1      (0x02)
-#define MCF_GPIO_PPDSDR_FECI2C_PPDSDR_FECI2C2      (0x04)
-#define MCF_GPIO_PPDSDR_FECI2C_PPDSDR_FECI2C3      (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_UART */
-#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART0          (0x01)
-#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART1          (0x02)
-#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART2          (0x04)
-#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART3          (0x08)
-#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART4          (0x10)
-#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART5          (0x20)
-#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART6          (0x40)
-#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART7          (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_QSPI */
-#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI0          (0x01)
-#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI1          (0x02)
-#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI2          (0x04)
-#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI3          (0x08)
-#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI4          (0x10)
-#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI5          (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_TIMER */
-#define MCF_GPIO_PPDSDR_TIMER_PPDSDR_TIMER0        (0x01)
-#define MCF_GPIO_PPDSDR_TIMER_PPDSDR_TIMER1        (0x02)
-#define MCF_GPIO_PPDSDR_TIMER_PPDSDR_TIMER2        (0x04)
-#define MCF_GPIO_PPDSDR_TIMER_PPDSDR_TIMER3        (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDDATAH */
-#define MCF_GPIO_PPDSDR_LCDDATAH_PPDSDR_LCDDATAH0  (0x01)
-#define MCF_GPIO_PPDSDR_LCDDATAH_PPDSDR_LCDDATAH1  (0x02)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDDATAM */
-#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM0  (0x01)
-#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM1  (0x02)
-#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM2  (0x04)
-#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM3  (0x08)
-#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM4  (0x10)
-#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM5  (0x20)
-#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM6  (0x40)
-#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM7  (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDDATAL */
-#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL0  (0x01)
-#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL1  (0x02)
-#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL2  (0x04)
-#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL3  (0x08)
-#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL4  (0x10)
-#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL5  (0x20)
-#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL6  (0x40)
-#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL7  (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDCTLH */
-#define MCF_GPIO_PPDSDR_LCDCTLH_PPDSDR_LCDCTLH0    (0x01)
-
-/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDCTLL */
-#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL0    (0x01)
-#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL1    (0x02)
-#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL2    (0x04)
-#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL3    (0x08)
-#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL4    (0x10)
-#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL5    (0x20)
-#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL6    (0x40)
-#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL7    (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_FECH */
-#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH0            (0x01)
-#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH1            (0x02)
-#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH2            (0x04)
-#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH3            (0x08)
-#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH4            (0x10)
-#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH5            (0x20)
-#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH6            (0x40)
-#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH7            (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_FECL */
-#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL0            (0x01)
-#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL1            (0x02)
-#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL2            (0x04)
-#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL3            (0x08)
-#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL4            (0x10)
-#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL5            (0x20)
-#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL6            (0x40)
-#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL7            (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_SSI */
-#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI0              (0x01)
-#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI1              (0x02)
-#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI2              (0x04)
-#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI3              (0x08)
-#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI4              (0x10)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_BUSCTL */
-#define MCF_GPIO_PCLRR_BUSCTL_POSDR_BUSCTL0        (0x01)
-#define MCF_GPIO_PCLRR_BUSCTL_PCLRR_BUSCTL1        (0x02)
-#define MCF_GPIO_PCLRR_BUSCTL_PCLRR_BUSCTL2        (0x04)
-#define MCF_GPIO_PCLRR_BUSCTL_PCLRR_BUSCTL3        (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_BE */
-#define MCF_GPIO_PCLRR_BE_PCLRR_BE0                (0x01)
-#define MCF_GPIO_PCLRR_BE_PCLRR_BE1                (0x02)
-#define MCF_GPIO_PCLRR_BE_PCLRR_BE2                (0x04)
-#define MCF_GPIO_PCLRR_BE_PCLRR_BE3                (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_CS */
-#define MCF_GPIO_PCLRR_CS_PCLRR_CS1                (0x02)
-#define MCF_GPIO_PCLRR_CS_PCLRR_CS2                (0x04)
-#define MCF_GPIO_PCLRR_CS_PCLRR_CS3                (0x08)
-#define MCF_GPIO_PCLRR_CS_PCLRR_CS4                (0x10)
-#define MCF_GPIO_PCLRR_CS_PCLRR_CS5                (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_PWM */
-#define MCF_GPIO_PCLRR_PWM_PCLRR_PWM2              (0x04)
-#define MCF_GPIO_PCLRR_PWM_PCLRR_PWM3              (0x08)
-#define MCF_GPIO_PCLRR_PWM_PCLRR_PWM4              (0x10)
-#define MCF_GPIO_PCLRR_PWM_PCLRR_PWM5              (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_FECI2C */
-#define MCF_GPIO_PCLRR_FECI2C_PCLRR_FECI2C0        (0x01)
-#define MCF_GPIO_PCLRR_FECI2C_PCLRR_FECI2C1        (0x02)
-#define MCF_GPIO_PCLRR_FECI2C_PCLRR_FECI2C2        (0x04)
-#define MCF_GPIO_PCLRR_FECI2C_PCLRR_FECI2C3        (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_UART */
-#define MCF_GPIO_PCLRR_UART_PCLRR_UART0            (0x01)
-#define MCF_GPIO_PCLRR_UART_PCLRR_UART1            (0x02)
-#define MCF_GPIO_PCLRR_UART_PCLRR_UART2            (0x04)
-#define MCF_GPIO_PCLRR_UART_PCLRR_UART3            (0x08)
-#define MCF_GPIO_PCLRR_UART_PCLRR_UART4            (0x10)
-#define MCF_GPIO_PCLRR_UART_PCLRR_UART5            (0x20)
-#define MCF_GPIO_PCLRR_UART_PCLRR_UART6            (0x40)
-#define MCF_GPIO_PCLRR_UART_PCLRR_UART7            (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_QSPI */
-#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI0            (0x01)
-#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI1            (0x02)
-#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI2            (0x04)
-#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI3            (0x08)
-#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI4            (0x10)
-#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI5            (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_TIMER */
-#define MCF_GPIO_PCLRR_TIMER_PCLRR_TIMER0          (0x01)
-#define MCF_GPIO_PCLRR_TIMER_PCLRR_TIMER1          (0x02)
-#define MCF_GPIO_PCLRR_TIMER_PCLRR_TIMER2          (0x04)
-#define MCF_GPIO_PCLRR_TIMER_PCLRR_TIMER3          (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDDATAH */
-#define MCF_GPIO_PCLRR_LCDDATAH_PCLRR_LCDDATAH0    (0x01)
-#define MCF_GPIO_PCLRR_LCDDATAH_PCLRR_LCDDATAH1    (0x02)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDDATAM */
-#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM0    (0x01)
-#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM1    (0x02)
-#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM2    (0x04)
-#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM3    (0x08)
-#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM4    (0x10)
-#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM5    (0x20)
-#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM6    (0x40)
-#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM7    (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDDATAL */
-#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL0    (0x01)
-#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL1    (0x02)
-#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL2    (0x04)
-#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL3    (0x08)
-#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL4    (0x10)
-#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL5    (0x20)
-#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL6    (0x40)
-#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL7    (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDCTLH */
-#define MCF_GPIO_PCLRR_LCDCTLH_PCLRR_LCDCTLH0      (0x01)
-
-/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDCTLL */
-#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL0      (0x01)
-#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL1      (0x02)
-#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL2      (0x04)
-#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL3      (0x08)
-#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL4      (0x10)
-#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL5      (0x20)
-#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL6      (0x40)
-#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL7      (0x80)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_FEC */
-#define MCF_GPIO_PAR_FEC_PAR_FEC_MII(x)            (((x)&0x03)<<0)
-#define MCF_GPIO_PAR_FEC_PAR_FEC_7W(x)             (((x)&0x03)<<2)
-#define MCF_GPIO_PAR_FEC_PAR_FEC_7W_GPIO           (0x00)
-#define MCF_GPIO_PAR_FEC_PAR_FEC_7W_URTS1          (0x04)
-#define MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC            (0x0C)
-#define MCF_GPIO_PAR_FEC_PAR_FEC_MII_GPIO          (0x00)
-#define MCF_GPIO_PAR_FEC_PAR_FEC_MII_UART          (0x01)
-#define MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC           (0x03)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_PWM */
-#define MCF_GPIO_PAR_PWM_PAR_PWM1(x)               (((x)&0x03)<<0)
-#define MCF_GPIO_PAR_PWM_PAR_PWM3(x)               (((x)&0x03)<<2)
-#define MCF_GPIO_PAR_PWM_PAR_PWM5                  (0x10)
-#define MCF_GPIO_PAR_PWM_PAR_PWM7                  (0x20)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_BUSCTL */
-#define MCF_GPIO_PAR_BUSCTL_PAR_TS(x)              (((x)&0x03)<<3)
-#define MCF_GPIO_PAR_BUSCTL_PAR_RWB                (0x20)
-#define MCF_GPIO_PAR_BUSCTL_PAR_TA                 (0x40)
-#define MCF_GPIO_PAR_BUSCTL_PAR_OE                 (0x80)
-#define MCF_GPIO_PAR_BUSCTL_PAR_OE_GPIO            (0x00)
-#define MCF_GPIO_PAR_BUSCTL_PAR_OE_OE              (0x80)
-#define MCF_GPIO_PAR_BUSCTL_PAR_TA_GPIO            (0x00)
-#define MCF_GPIO_PAR_BUSCTL_PAR_TA_TA              (0x40)
-#define MCF_GPIO_PAR_BUSCTL_PAR_RWB_GPIO           (0x00)
-#define MCF_GPIO_PAR_BUSCTL_PAR_RWB_RWB            (0x20)
-#define MCF_GPIO_PAR_BUSCTL_PAR_TS_GPIO            (0x00)
-#define MCF_GPIO_PAR_BUSCTL_PAR_TS_DACK0           (0x10)
-#define MCF_GPIO_PAR_BUSCTL_PAR_TS_TS              (0x18)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_FECI2C */
-#define MCF_GPIO_PAR_FECI2C_PAR_SDA(x)             (((x)&0x03)<<0)
-#define MCF_GPIO_PAR_FECI2C_PAR_SCL(x)             (((x)&0x03)<<2)
-#define MCF_GPIO_PAR_FECI2C_PAR_MDIO(x)            (((x)&0x03)<<4)
-#define MCF_GPIO_PAR_FECI2C_PAR_MDC(x)             (((x)&0x03)<<6)
-#define MCF_GPIO_PAR_FECI2C_PAR_MDC_GPIO           (0x00)
-#define MCF_GPIO_PAR_FECI2C_PAR_MDC_UTXD2          (0x40)
-#define MCF_GPIO_PAR_FECI2C_PAR_MDC_SCL            (0x80)
-#define MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC           (0xC0)
-#define MCF_GPIO_PAR_FECI2C_PAR_MDIO_GPIO          (0x00)
-#define MCF_GPIO_PAR_FECI2C_PAR_MDIO_URXD2         (0x10)
-#define MCF_GPIO_PAR_FECI2C_PAR_MDIO_SDA           (0x20)
-#define MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO         (0x30)
-#define MCF_GPIO_PAR_FECI2C_PAR_SCL_GPIO           (0x00)
-#define MCF_GPIO_PAR_FECI2C_PAR_SCL_UTXD2          (0x04)
-#define MCF_GPIO_PAR_FECI2C_PAR_SCL_SCL            (0x0C)
-#define MCF_GPIO_PAR_FECI2C_PAR_SDA_GPIO           (0x00)
-#define MCF_GPIO_PAR_FECI2C_PAR_SDA_URXD2          (0x02)
-#define MCF_GPIO_PAR_FECI2C_PAR_SDA_SDA            (0x03)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_BE */
-#define MCF_GPIO_PAR_BE_PAR_BE0                    (0x01)
-#define MCF_GPIO_PAR_BE_PAR_BE1                    (0x02)
-#define MCF_GPIO_PAR_BE_PAR_BE2                    (0x04)
-#define MCF_GPIO_PAR_BE_PAR_BE3                    (0x08)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_CS */
-#define MCF_GPIO_PAR_CS_PAR_CS1                    (0x02)
-#define MCF_GPIO_PAR_CS_PAR_CS2                    (0x04)
-#define MCF_GPIO_PAR_CS_PAR_CS3                    (0x08)
-#define MCF_GPIO_PAR_CS_PAR_CS4                    (0x10)
-#define MCF_GPIO_PAR_CS_PAR_CS5                    (0x20)
-#define MCF_GPIO_PAR_CS_PAR_CS_CS1_GPIO            (0x00)
-#define MCF_GPIO_PAR_CS_PAR_CS_CS1_SDCS1           (0x01)
-#define MCF_GPIO_PAR_CS_PAR_CS_CS1_CS1             (0x03)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_SSI */
-#define MCF_GPIO_PAR_SSI_PAR_MCLK                  (0x0080)
-#define MCF_GPIO_PAR_SSI_PAR_TXD(x)                (((x)&0x0003)<<8)
-#define MCF_GPIO_PAR_SSI_PAR_RXD(x)                (((x)&0x0003)<<10)
-#define MCF_GPIO_PAR_SSI_PAR_FS(x)                 (((x)&0x0003)<<12)
-#define MCF_GPIO_PAR_SSI_PAR_BCLK(x)               (((x)&0x0003)<<14)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_UART */
-#define MCF_GPIO_PAR_UART_PAR_UTXD0                (0x0001)
-#define MCF_GPIO_PAR_UART_PAR_URXD0                (0x0002)
-#define MCF_GPIO_PAR_UART_PAR_URTS0                (0x0004)
-#define MCF_GPIO_PAR_UART_PAR_UCTS0                (0x0008)
-#define MCF_GPIO_PAR_UART_PAR_UTXD1(x)             (((x)&0x0003)<<4)
-#define MCF_GPIO_PAR_UART_PAR_URXD1(x)             (((x)&0x0003)<<6)
-#define MCF_GPIO_PAR_UART_PAR_URTS1(x)             (((x)&0x0003)<<8)
-#define MCF_GPIO_PAR_UART_PAR_UCTS1(x)             (((x)&0x0003)<<10)
-#define MCF_GPIO_PAR_UART_PAR_UCTS1_GPIO           (0x0000)
-#define MCF_GPIO_PAR_UART_PAR_UCTS1_SSI_BCLK       (0x0800)
-#define MCF_GPIO_PAR_UART_PAR_UCTS1_ULPI_D7        (0x0400)
-#define MCF_GPIO_PAR_UART_PAR_UCTS1_UCTS1          (0x0C00)
-#define MCF_GPIO_PAR_UART_PAR_URTS1_GPIO           (0x0000)
-#define MCF_GPIO_PAR_UART_PAR_URTS1_SSI_FS         (0x0200)
-#define MCF_GPIO_PAR_UART_PAR_URTS1_ULPI_D6        (0x0100)
-#define MCF_GPIO_PAR_UART_PAR_URTS1_URTS1          (0x0300)
-#define MCF_GPIO_PAR_UART_PAR_URXD1_GPIO           (0x0000)
-#define MCF_GPIO_PAR_UART_PAR_URXD1_SSI_RXD        (0x0080)
-#define MCF_GPIO_PAR_UART_PAR_URXD1_ULPI_D5        (0x0040)
-#define MCF_GPIO_PAR_UART_PAR_URXD1_URXD1          (0x00C0)
-#define MCF_GPIO_PAR_UART_PAR_UTXD1_GPIO           (0x0000)
-#define MCF_GPIO_PAR_UART_PAR_UTXD1_SSI_TXD        (0x0020)
-#define MCF_GPIO_PAR_UART_PAR_UTXD1_ULPI_D4        (0x0010)
-#define MCF_GPIO_PAR_UART_PAR_UTXD1_UTXD1          (0x0030)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_QSPI */
-#define MCF_GPIO_PAR_QSPI_PAR_SCK(x)               (((x)&0x0003)<<4)
-#define MCF_GPIO_PAR_QSPI_PAR_DOUT(x)              (((x)&0x0003)<<6)
-#define MCF_GPIO_PAR_QSPI_PAR_DIN(x)               (((x)&0x0003)<<8)
-#define MCF_GPIO_PAR_QSPI_PAR_PCS0(x)              (((x)&0x0003)<<10)
-#define MCF_GPIO_PAR_QSPI_PAR_PCS1(x)              (((x)&0x0003)<<12)
-#define MCF_GPIO_PAR_QSPI_PAR_PCS2(x)              (((x)&0x0003)<<14)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_TIMER */
-#define MCF_GPIO_PAR_TIMER_PAR_TIN0(x)             (((x)&0x03)<<0)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN1(x)             (((x)&0x03)<<2)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN2(x)             (((x)&0x03)<<4)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN3(x)             (((x)&0x03)<<6)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN3_GPIO           (0x00)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN3_TOUT3          (0x80)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN3_URXD2          (0x40)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN3_TIN3           (0xC0)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN2_GPIO           (0x00)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN2_TOUT2          (0x20)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN2_UTXD2          (0x10)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN2_TIN2           (0x30)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN1_GPIO           (0x00)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN1_TOUT1          (0x08)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN1_DACK1          (0x04)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN1_TIN1           (0x0C)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN0_GPIO           (0x00)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN0_TOUT0          (0x02)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN0_DREQ0          (0x01)
-#define MCF_GPIO_PAR_TIMER_PAR_TIN0_TIN0           (0x03)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_LCDDATA */
-#define MCF_GPIO_PAR_LCDDATA_PAR_LD7_0(x)          (((x)&0x03)<<0)
-#define MCF_GPIO_PAR_LCDDATA_PAR_LD15_8(x)         (((x)&0x03)<<2)
-#define MCF_GPIO_PAR_LCDDATA_PAR_LD16(x)           (((x)&0x03)<<4)
-#define MCF_GPIO_PAR_LCDDATA_PAR_LD17(x)           (((x)&0x03)<<6)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_LCDCTL */
-#define MCF_GPIO_PAR_LCDCTL_PAR_CLS                (0x0001)
-#define MCF_GPIO_PAR_LCDCTL_PAR_PS                 (0x0002)
-#define MCF_GPIO_PAR_LCDCTL_PAR_REV                (0x0004)
-#define MCF_GPIO_PAR_LCDCTL_PAR_SPL_SPR            (0x0008)
-#define MCF_GPIO_PAR_LCDCTL_PAR_CONTRAST           (0x0010)
-#define MCF_GPIO_PAR_LCDCTL_PAR_LSCLK              (0x0020)
-#define MCF_GPIO_PAR_LCDCTL_PAR_LP_HSYNC           (0x0040)
-#define MCF_GPIO_PAR_LCDCTL_PAR_FLM_VSYNC          (0x0080)
-#define MCF_GPIO_PAR_LCDCTL_PAR_ACD_OE             (0x0100)
-
-/* Bit definitions and macros for MCF_GPIO_PAR_IRQ */
-#define MCF_GPIO_PAR_IRQ_PAR_IRQ1(x)               (((x)&0x0003)<<4)
-#define MCF_GPIO_PAR_IRQ_PAR_IRQ2(x)               (((x)&0x0003)<<6)
-#define MCF_GPIO_PAR_IRQ_PAR_IRQ4(x)               (((x)&0x0003)<<8)
-#define MCF_GPIO_PAR_IRQ_PAR_IRQ5(x)               (((x)&0x0003)<<10)
-#define MCF_GPIO_PAR_IRQ_PAR_IRQ6(x)               (((x)&0x0003)<<12)
-
-/* Bit definitions and macros for MCF_GPIO_MSCR_FLEXBUS */
-#define MCF_GPIO_MSCR_FLEXBUS_MSCR_ADDRCTL(x)      (((x)&0x03)<<0)
-#define MCF_GPIO_MSCR_FLEXBUS_MSCR_DLOWER(x)       (((x)&0x03)<<2)
-#define MCF_GPIO_MSCR_FLEXBUS_MSCR_DUPPER(x)       (((x)&0x03)<<4)
-
-/* Bit definitions and macros for MCF_GPIO_MSCR_SDRAM */
-#define MCF_GPIO_MSCR_SDRAM_MSCR_SDRAM(x)          (((x)&0x03)<<0)
-#define MCF_GPIO_MSCR_SDRAM_MSCR_SDCLK(x)          (((x)&0x03)<<2)
-#define MCF_GPIO_MSCR_SDRAM_MSCR_SDCLKB(x)         (((x)&0x03)<<4)
-
-/* Bit definitions and macros for MCF_GPIO_DSCR_I2C */
-#define MCF_GPIO_DSCR_I2C_I2C_DSE(x)               (((x)&0x03)<<0)
-
-/* Bit definitions and macros for MCF_GPIO_DSCR_PWM */
-#define MCF_GPIO_DSCR_PWM_PWM_DSE(x)               (((x)&0x03)<<0)
-
-/* Bit definitions and macros for MCF_GPIO_DSCR_FEC */
-#define MCF_GPIO_DSCR_FEC_FEC_DSE(x)               (((x)&0x03)<<0)
-
-/* Bit definitions and macros for MCF_GPIO_DSCR_UART */
-#define MCF_GPIO_DSCR_UART_UART0_DSE(x)            (((x)&0x03)<<0)
-#define MCF_GPIO_DSCR_UART_UART1_DSE(x)            (((x)&0x03)<<2)
-
-/* Bit definitions and macros for MCF_GPIO_DSCR_QSPI */
-#define MCF_GPIO_DSCR_QSPI_QSPI_DSE(x)             (((x)&0x03)<<0)
-
-/* Bit definitions and macros for MCF_GPIO_DSCR_TIMER */
-#define MCF_GPIO_DSCR_TIMER_TIMER_DSE(x)           (((x)&0x03)<<0)
-
-/* Bit definitions and macros for MCF_GPIO_DSCR_SSI */
-#define MCF_GPIO_DSCR_SSI_SSI_DSE(x)               (((x)&0x03)<<0)
-
-/* Bit definitions and macros for MCF_GPIO_DSCR_LCD */
-#define MCF_GPIO_DSCR_LCD_LCD_DSE(x)               (((x)&0x03)<<0)
-
-/* Bit definitions and macros for MCF_GPIO_DSCR_DEBUG */
-#define MCF_GPIO_DSCR_DEBUG_DEBUG_DSE(x)           (((x)&0x03)<<0)
-
-/* Bit definitions and macros for MCF_GPIO_DSCR_CLKRST */
-#define MCF_GPIO_DSCR_CLKRST_CLKRST_DSE(x)         (((x)&0x03)<<0)
-
-/* Bit definitions and macros for MCF_GPIO_DSCR_IRQ */
-#define MCF_GPIO_DSCR_IRQ_IRQ_DSE(x)               (((x)&0x03)<<0)
-
-/*
- * Generic GPIO support
- */
-#define MCFGPIO_PODR                   MCFGPIO_PODR_FECH
-#define MCFGPIO_PDDR                   MCFGPIO_PDDR_FECH
-#define MCFGPIO_PPDR                   MCFGPIO_PPDSDR_FECH
-#define MCFGPIO_SETR                   MCFGPIO_PPDSDR_FECH
-#define MCFGPIO_CLRR                   MCFGPIO_PCLRR_FECH
-
-#define MCFGPIO_PIN_MAX                        136
-#define MCFGPIO_IRQ_MAX                        8
-#define MCFGPIO_IRQ_VECBASE            MCFINT_VECBASE
-
-/*********************************************************************
- *
- * Phase Locked Loop (PLL)
- *
- *********************************************************************/
-
-/* Register read/write macros */
-#define MCF_PLL_PODR              0xFC0C0000
-#define MCF_PLL_PLLCR             0xFC0C0004
-#define MCF_PLL_PMDR              0xFC0C0008
-#define MCF_PLL_PFDR              0xFC0C000C
-
-/* Bit definitions and macros for MCF_PLL_PODR */
-#define MCF_PLL_PODR_BUSDIV(x)    (((x)&0x0F)<<0)
-#define MCF_PLL_PODR_CPUDIV(x)    (((x)&0x0F)<<4)
-
-/* Bit definitions and macros for MCF_PLL_PLLCR */
-#define MCF_PLL_PLLCR_DITHDEV(x)  (((x)&0x07)<<0)
-#define MCF_PLL_PLLCR_DITHEN      (0x80)
-
-/* Bit definitions and macros for MCF_PLL_PMDR */
-#define MCF_PLL_PMDR_MODDIV(x)    (((x)&0xFF)<<0)
-
-/* Bit definitions and macros for MCF_PLL_PFDR */
-#define MCF_PLL_PFDR_MFD(x)       (((x)&0xFF)<<0)
-
-/*********************************************************************
- *
- * System Control Module Registers (SCM)
- *
- *********************************************************************/
-
-/* Register read/write macros */
-#define MCF_SCM_MPR                    0xFC000000
-#define MCF_SCM_PACRA                  0xFC000020
-#define MCF_SCM_PACRB                  0xFC000024
-#define MCF_SCM_PACRC                  0xFC000028
-#define MCF_SCM_PACRD                  0xFC00002C
-#define MCF_SCM_PACRE                  0xFC000040
-#define MCF_SCM_PACRF                  0xFC000044
-
-#define MCF_SCM_BCR                    0xFC040024
-
-/*********************************************************************
- *
- * SDRAM Controller (SDRAMC)
- *
- *********************************************************************/
-
-/* Register read/write macros */
-#define MCF_SDRAMC_SDMR                        0xFC0B8000
-#define MCF_SDRAMC_SDCR                        0xFC0B8004
-#define MCF_SDRAMC_SDCFG1              0xFC0B8008
-#define MCF_SDRAMC_SDCFG2              0xFC0B800C
-#define MCF_SDRAMC_LIMP_FIX            0xFC0B8080
-#define MCF_SDRAMC_SDDS                        0xFC0B8100
-#define MCF_SDRAMC_SDCS0               0xFC0B8110
-#define MCF_SDRAMC_SDCS1               0xFC0B8114
-#define MCF_SDRAMC_SDCS2               0xFC0B8118
-#define MCF_SDRAMC_SDCS3               0xFC0B811C
-
-/* Bit definitions and macros for MCF_SDRAMC_SDMR */
-#define MCF_SDRAMC_SDMR_CMD            (0x00010000)
-#define MCF_SDRAMC_SDMR_AD(x)          (((x)&0x00000FFF)<<18)
-#define MCF_SDRAMC_SDMR_BNKAD(x)       (((x)&0x00000003)<<30)
-#define MCF_SDRAMC_SDMR_BNKAD_LMR      (0x00000000)
-#define MCF_SDRAMC_SDMR_BNKAD_LEMR     (0x40000000)
-
-/* Bit definitions and macros for MCF_SDRAMC_SDCR */
-#define MCF_SDRAMC_SDCR_IPALL          (0x00000002)
-#define MCF_SDRAMC_SDCR_IREF           (0x00000004)
-#define MCF_SDRAMC_SDCR_DQS_OE(x)      (((x)&0x0000000F)<<8)
-#define MCF_SDRAMC_SDCR_PS(x)          (((x)&0x00000003)<<12)
-#define MCF_SDRAMC_SDCR_RCNT(x)                (((x)&0x0000003F)<<16)
-#define MCF_SDRAMC_SDCR_OE_RULE                (0x00400000)
-#define MCF_SDRAMC_SDCR_MUX(x)         (((x)&0x00000003)<<24)
-#define MCF_SDRAMC_SDCR_REF            (0x10000000)
-#define MCF_SDRAMC_SDCR_DDR            (0x20000000)
-#define MCF_SDRAMC_SDCR_CKE            (0x40000000)
-#define MCF_SDRAMC_SDCR_MODE_EN                (0x80000000)
-#define MCF_SDRAMC_SDCR_PS_16          (0x00002000)
-#define MCF_SDRAMC_SDCR_PS_32          (0x00000000)
-
-/* Bit definitions and macros for MCF_SDRAMC_SDCFG1 */
-#define MCF_SDRAMC_SDCFG1_WTLAT(x)     (((x)&0x00000007)<<4)
-#define MCF_SDRAMC_SDCFG1_REF2ACT(x)   (((x)&0x0000000F)<<8)
-#define MCF_SDRAMC_SDCFG1_PRE2ACT(x)   (((x)&0x00000007)<<12)
-#define MCF_SDRAMC_SDCFG1_ACT2RW(x)    (((x)&0x00000007)<<16)
-#define MCF_SDRAMC_SDCFG1_RDLAT(x)     (((x)&0x0000000F)<<20)
-#define MCF_SDRAMC_SDCFG1_SWT2RD(x)    (((x)&0x00000007)<<24)
-#define MCF_SDRAMC_SDCFG1_SRD2RW(x)    (((x)&0x0000000F)<<28)
-
-/* Bit definitions and macros for MCF_SDRAMC_SDCFG2 */
-#define MCF_SDRAMC_SDCFG2_BL(x)                (((x)&0x0000000F)<<16)
-#define MCF_SDRAMC_SDCFG2_BRD2WT(x)    (((x)&0x0000000F)<<20)
-#define MCF_SDRAMC_SDCFG2_BWT2RW(x)    (((x)&0x0000000F)<<24)
-#define MCF_SDRAMC_SDCFG2_BRD2PRE(x)   (((x)&0x0000000F)<<28)
-
-/* Device Errata - LIMP mode work around */
-#define MCF_SDRAMC_REFRESH             (0x40000000)
-
-/* Bit definitions and macros for MCF_SDRAMC_SDDS */
-#define MCF_SDRAMC_SDDS_SB_D(x)                (((x)&0x00000003)<<0)
-#define MCF_SDRAMC_SDDS_SB_S(x)                (((x)&0x00000003)<<2)
-#define MCF_SDRAMC_SDDS_SB_A(x)                (((x)&0x00000003)<<4)
-#define MCF_SDRAMC_SDDS_SB_C(x)                (((x)&0x00000003)<<6)
-#define MCF_SDRAMC_SDDS_SB_E(x)                (((x)&0x00000003)<<8)
-
-/* Bit definitions and macros for MCF_SDRAMC_SDCS */
-#define MCF_SDRAMC_SDCS_CSSZ(x)                (((x)&0x0000001F)<<0)
-#define MCF_SDRAMC_SDCS_BASE(x)                (((x)&0x00000FFF)<<20)
-#define MCF_SDRAMC_SDCS_BA(x)          ((x)&0xFFF00000)
-#define MCF_SDRAMC_SDCS_CSSZ_DIABLE    (0x00000000)
-#define MCF_SDRAMC_SDCS_CSSZ_1MBYTE    (0x00000013)
-#define MCF_SDRAMC_SDCS_CSSZ_2MBYTE    (0x00000014)
-#define MCF_SDRAMC_SDCS_CSSZ_4MBYTE    (0x00000015)
-#define MCF_SDRAMC_SDCS_CSSZ_8MBYTE    (0x00000016)
-#define MCF_SDRAMC_SDCS_CSSZ_16MBYTE   (0x00000017)
-#define MCF_SDRAMC_SDCS_CSSZ_32MBYTE   (0x00000018)
-#define MCF_SDRAMC_SDCS_CSSZ_64MBYTE   (0x00000019)
-#define MCF_SDRAMC_SDCS_CSSZ_128MBYTE  (0x0000001A)
-#define MCF_SDRAMC_SDCS_CSSZ_256MBYTE  (0x0000001B)
-#define MCF_SDRAMC_SDCS_CSSZ_512MBYTE  (0x0000001C)
-#define MCF_SDRAMC_SDCS_CSSZ_1GBYTE    (0x0000001D)
-#define MCF_SDRAMC_SDCS_CSSZ_2GBYTE    (0x0000001E)
-#define MCF_SDRAMC_SDCS_CSSZ_4GBYTE    (0x0000001F)
-
-/*
- * Edge Port Module (EPORT)
- */
-#define MCFEPORT_EPPAR                (0xFC094000)
-#define MCFEPORT_EPDDR                (0xFC094002)
-#define MCFEPORT_EPIER                (0xFC094003)
-#define MCFEPORT_EPDR                 (0xFC094004)
-#define MCFEPORT_EPPDR                (0xFC094005)
-#define MCFEPORT_EPFR                 (0xFC094006)
-
-/********************************************************************/
-#endif /* m532xsim_h */
index cd952b0a8bd32d81db35ef6382b9baa0af2bf410..3177ce8331d6916d5c83362f6810d4fd28728cd5 100644 (file)
@@ -55,8 +55,8 @@
 #define        CACHE_SIZE      0x2000          /* 8k of unified cache */
 #define        ICACHE_SIZE     CACHE_SIZE
 #define        DCACHE_SIZE     CACHE_SIZE
-#elif defined(CONFIG_M532x)
-#define        CACHE_SIZE      0x4000          /* 32k of unified cache */
+#elif defined(CONFIG_M53xx)
+#define        CACHE_SIZE      0x4000          /* 16k of unified cache */
 #define        ICACHE_SIZE     CACHE_SIZE
 #define        DCACHE_SIZE     CACHE_SIZE
 #endif
diff --git a/arch/m68k/include/asm/m53xxsim.h b/arch/m68k/include/asm/m53xxsim.h
new file mode 100644 (file)
index 0000000..faa1a21
--- /dev/null
@@ -0,0 +1,1241 @@
+/****************************************************************************/
+
+/*
+ *     m53xxsim.h -- ColdFire 5329 registers
+ */
+
+/****************************************************************************/
+#ifndef        m53xxsim_h
+#define        m53xxsim_h
+/****************************************************************************/
+
+#define        CPU_NAME                "COLDFIRE(m53xx)"
+#define        CPU_INSTR_PER_JIFFY     3
+#define        MCF_BUSCLK              (MCF_CLK / 3)
+
+#include <asm/m53xxacr.h>
+
+#define MCFINT_VECBASE      64
+#define MCFINT_UART0        26          /* Interrupt number for UART0 */
+#define MCFINT_UART1        27          /* Interrupt number for UART1 */
+#define MCFINT_UART2        28          /* Interrupt number for UART2 */
+#define MCFINT_QSPI         31          /* Interrupt number for QSPI */
+#define MCFINT_FECRX0      36          /* Interrupt number for FEC */
+#define MCFINT_FECTX0      40          /* Interrupt number for FEC */
+#define MCFINT_FECENTC0            42          /* Interrupt number for FEC */
+
+#define MCF_IRQ_UART0       (MCFINT_VECBASE + MCFINT_UART0)
+#define MCF_IRQ_UART1       (MCFINT_VECBASE + MCFINT_UART1)
+#define MCF_IRQ_UART2       (MCFINT_VECBASE + MCFINT_UART2)
+
+#define MCF_IRQ_FECRX0     (MCFINT_VECBASE + MCFINT_FECRX0)
+#define MCF_IRQ_FECTX0     (MCFINT_VECBASE + MCFINT_FECTX0)
+#define MCF_IRQ_FECENTC0    (MCFINT_VECBASE + MCFINT_FECENTC0)
+
+#define        MCF_IRQ_QSPI        (MCFINT_VECBASE + MCFINT_QSPI)
+
+#define MCF_WTM_WCR            0xFC098000
+
+/*
+ *     Define the 532x SIM register set addresses.
+ */
+#define        MCFSIM_IPRL             0xFC048004
+#define        MCFSIM_IPRH             0xFC048000
+#define        MCFSIM_IPR              MCFSIM_IPRL
+#define        MCFSIM_IMRL             0xFC04800C
+#define        MCFSIM_IMRH             0xFC048008
+#define        MCFSIM_IMR              MCFSIM_IMRL
+#define        MCFSIM_ICR0             0xFC048040      
+#define        MCFSIM_ICR1             0xFC048041      
+#define        MCFSIM_ICR2             0xFC048042      
+#define        MCFSIM_ICR3             0xFC048043      
+#define        MCFSIM_ICR4             0xFC048044      
+#define        MCFSIM_ICR5             0xFC048045      
+#define        MCFSIM_ICR6             0xFC048046      
+#define        MCFSIM_ICR7             0xFC048047      
+#define        MCFSIM_ICR8             0xFC048048      
+#define        MCFSIM_ICR9             0xFC048049      
+#define        MCFSIM_ICR10            0xFC04804A
+#define        MCFSIM_ICR11            0xFC04804B
+
+/*
+ *     Some symbol defines for the above...
+ */
+#define        MCFSIM_SWDICR           MCFSIM_ICR0     /* Watchdog timer ICR */
+#define        MCFSIM_TIMER1ICR        MCFSIM_ICR1     /* Timer 1 ICR */
+#define        MCFSIM_TIMER2ICR        MCFSIM_ICR2     /* Timer 2 ICR */
+#define        MCFSIM_UART1ICR         MCFSIM_ICR4     /* UART 1 ICR */
+#define        MCFSIM_UART2ICR         MCFSIM_ICR5     /* UART 2 ICR */
+#define        MCFSIM_DMA0ICR          MCFSIM_ICR6     /* DMA 0 ICR */
+#define        MCFSIM_DMA1ICR          MCFSIM_ICR7     /* DMA 1 ICR */
+#define        MCFSIM_DMA2ICR          MCFSIM_ICR8     /* DMA 2 ICR */
+#define        MCFSIM_DMA3ICR          MCFSIM_ICR9     /* DMA 3 ICR */
+
+
+#define        MCFINTC0_SIMR           0xFC04801C
+#define        MCFINTC0_CIMR           0xFC04801D
+#define        MCFINTC0_ICR0           0xFC048040
+#define        MCFINTC1_SIMR           0xFC04C01C
+#define        MCFINTC1_CIMR           0xFC04C01D
+#define        MCFINTC1_ICR0           0xFC04C040
+#define MCFINTC2_SIMR          (0)
+#define MCFINTC2_CIMR          (0)
+#define MCFINTC2_ICR0          (0)
+
+#define MCFSIM_ICR_TIMER1      (0xFC048040+32)
+#define MCFSIM_ICR_TIMER2      (0xFC048040+33)
+
+/*
+ *     Define system peripheral IRQ usage.
+ */
+#define        MCF_IRQ_TIMER           (64 + 32)       /* Timer0 */
+#define        MCF_IRQ_PROFILER        (64 + 33)       /* Timer1 */
+
+/*
+ *  UART module.
+ */
+#define MCFUART_BASE0          0xFC060000      /* Base address of UART1 */
+#define MCFUART_BASE1          0xFC064000      /* Base address of UART2 */
+#define MCFUART_BASE2          0xFC068000      /* Base address of UART3 */
+
+/*
+ *  FEC module.
+ */
+#define        MCFFEC_BASE0            0xFC030000      /* Base address of FEC0 */
+#define        MCFFEC_SIZE0            0x800           /* Size of FEC0 region */
+
+/*
+ *  QSPI module.
+ */
+#define        MCFQSPI_BASE            0xFC05C000      /* Base address of QSPI */
+#define        MCFQSPI_SIZE            0x40            /* Size of QSPI region */
+
+#define        MCFQSPI_CS0             84
+#define        MCFQSPI_CS1             85
+#define        MCFQSPI_CS2             86
+
+/*
+ *  Timer module.
+ */
+#define MCFTIMER_BASE1         0xFC070000      /* Base address of TIMER1 */
+#define MCFTIMER_BASE2         0xFC074000      /* Base address of TIMER2 */
+#define MCFTIMER_BASE3         0xFC078000      /* Base address of TIMER3 */
+#define MCFTIMER_BASE4         0xFC07C000      /* Base address of TIMER4 */
+
+/*********************************************************************
+ *
+ * Reset Controller Module
+ *
+ *********************************************************************/
+
+#define        MCF_RCR                 0xFC0A0000
+#define        MCF_RSR                 0xFC0A0001
+
+#define        MCF_RCR_SWRESET         0x80            /* Software reset bit */
+#define        MCF_RCR_FRCSTOUT        0x40            /* Force external reset */
+
+
+/*
+ * Power Management
+ */
+#define MCFPM_WCR              0xfc040013
+#define MCFPM_PPMSR0           0xfc04002c
+#define MCFPM_PPMCR0           0xfc04002d
+#define MCFPM_PPMSR1           0xfc04002e
+#define MCFPM_PPMCR1           0xfc04002f
+#define MCFPM_PPMHR0           0xfc040030
+#define MCFPM_PPMLR0           0xfc040034
+#define MCFPM_PPMHR1           0xfc040038
+#define MCFPM_LPCR             0xec090007
+
+/*
+ *     The M5329EVB board needs a help getting its devices initialized 
+ *     at kernel start time if dBUG doesn't set it up (for example 
+ *     it is not used), so we need to do it manually.
+ */
+#ifdef __ASSEMBLER__
+.macro m5329EVB_setup
+       movel   #0xFC098000, %a7
+       movel   #0x0, (%a7)
+#define CORE_SRAM      0x80000000      
+#define CORE_SRAM_SIZE 0x8000
+       movel   #CORE_SRAM, %d0
+       addl    #0x221, %d0
+       movec   %d0,%RAMBAR1
+       movel   #CORE_SRAM, %sp
+       addl    #CORE_SRAM_SIZE, %sp
+       jsr     sysinit
+.endm
+#define        PLATFORM_SETUP  m5329EVB_setup
+
+#endif /* __ASSEMBLER__ */
+
+/*********************************************************************
+ *
+ * Chip Configuration Module (CCM)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_CCM_CCR               0xFC0A0004
+#define MCF_CCM_RCON              0xFC0A0008
+#define MCF_CCM_CIR               0xFC0A000A
+#define MCF_CCM_MISCCR            0xFC0A0010
+#define MCF_CCM_CDR               0xFC0A0012
+#define MCF_CCM_UHCSR             0xFC0A0014
+#define MCF_CCM_UOCSR             0xFC0A0016
+
+/* Bit definitions and macros for MCF_CCM_CCR */
+#define MCF_CCM_CCR_RESERVED      (0x0001)
+#define MCF_CCM_CCR_PLL_MODE      (0x0003)
+#define MCF_CCM_CCR_OSC_MODE      (0x0005)
+#define MCF_CCM_CCR_BOOTPS(x)     (((x)&0x0003)<<3|0x0001)
+#define MCF_CCM_CCR_LOAD          (0x0021)
+#define MCF_CCM_CCR_LIMP          (0x0041)
+#define MCF_CCM_CCR_CSC(x)        (((x)&0x0003)<<8|0x0001)
+
+/* Bit definitions and macros for MCF_CCM_RCON */
+#define MCF_CCM_RCON_RESERVED     (0x0001)
+#define MCF_CCM_RCON_PLL_MODE     (0x0003)
+#define MCF_CCM_RCON_OSC_MODE     (0x0005)
+#define MCF_CCM_RCON_BOOTPS(x)    (((x)&0x0003)<<3|0x0001)
+#define MCF_CCM_RCON_LOAD         (0x0021)
+#define MCF_CCM_RCON_LIMP         (0x0041)
+#define MCF_CCM_RCON_CSC(x)       (((x)&0x0003)<<8|0x0001)
+
+/* Bit definitions and macros for MCF_CCM_CIR */
+#define MCF_CCM_CIR_PRN(x)        (((x)&0x003F)<<0)
+#define MCF_CCM_CIR_PIN(x)        (((x)&0x03FF)<<6)
+
+/* Bit definitions and macros for MCF_CCM_MISCCR */
+#define MCF_CCM_MISCCR_USBSRC     (0x0001)
+#define MCF_CCM_MISCCR_USBDIV     (0x0002)
+#define MCF_CCM_MISCCR_SSI_SRC    (0x0010)
+#define MCF_CCM_MISCCR_TIM_DMA   (0x0020)
+#define MCF_CCM_MISCCR_SSI_PUS    (0x0040)
+#define MCF_CCM_MISCCR_SSI_PUE    (0x0080)
+#define MCF_CCM_MISCCR_LCD_CHEN   (0x0100)
+#define MCF_CCM_MISCCR_LIMP       (0x1000)
+#define MCF_CCM_MISCCR_PLL_LOCK   (0x2000)
+
+/* Bit definitions and macros for MCF_CCM_CDR */
+#define MCF_CCM_CDR_SSIDIV(x)     (((x)&0x000F)<<0)
+#define MCF_CCM_CDR_LPDIV(x)      (((x)&0x000F)<<8)
+
+/* Bit definitions and macros for MCF_CCM_UHCSR */
+#define MCF_CCM_UHCSR_XPDE        (0x0001)
+#define MCF_CCM_UHCSR_UHMIE       (0x0002)
+#define MCF_CCM_UHCSR_WKUP        (0x0004)
+#define MCF_CCM_UHCSR_PORTIND(x)  (((x)&0x0003)<<14)
+
+/* Bit definitions and macros for MCF_CCM_UOCSR */
+#define MCF_CCM_UOCSR_XPDE        (0x0001)
+#define MCF_CCM_UOCSR_UOMIE       (0x0002)
+#define MCF_CCM_UOCSR_WKUP        (0x0004)
+#define MCF_CCM_UOCSR_PWRFLT      (0x0008)
+#define MCF_CCM_UOCSR_SEND        (0x0010)
+#define MCF_CCM_UOCSR_VVLD        (0x0020)
+#define MCF_CCM_UOCSR_BVLD        (0x0040)
+#define MCF_CCM_UOCSR_AVLD        (0x0080)
+#define MCF_CCM_UOCSR_DPPU        (0x0100)
+#define MCF_CCM_UOCSR_DCR_VBUS    (0x0200)
+#define MCF_CCM_UOCSR_CRG_VBUS    (0x0400)
+#define MCF_CCM_UOCSR_DRV_VBUS    (0x0800)
+#define MCF_CCM_UOCSR_DMPD        (0x1000)
+#define MCF_CCM_UOCSR_DPPD        (0x2000)
+#define MCF_CCM_UOCSR_PORTIND(x)  (((x)&0x0003)<<14)
+
+/*********************************************************************
+ *
+ * FlexBus Chip Selects (FBCS)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_FBCS0_CSAR         0xFC008000
+#define MCF_FBCS0_CSMR         0xFC008004
+#define MCF_FBCS0_CSCR         0xFC008008
+#define MCF_FBCS1_CSAR         0xFC00800C
+#define MCF_FBCS1_CSMR         0xFC008010
+#define MCF_FBCS1_CSCR         0xFC008014
+#define MCF_FBCS2_CSAR         0xFC008018
+#define MCF_FBCS2_CSMR         0xFC00801C
+#define MCF_FBCS2_CSCR         0xFC008020
+#define MCF_FBCS3_CSAR         0xFC008024
+#define MCF_FBCS3_CSMR         0xFC008028
+#define MCF_FBCS3_CSCR         0xFC00802C
+#define MCF_FBCS4_CSAR         0xFC008030
+#define MCF_FBCS4_CSMR         0xFC008034
+#define MCF_FBCS4_CSCR         0xFC008038
+#define MCF_FBCS5_CSAR         0xFC00803C
+#define MCF_FBCS5_CSMR         0xFC008040
+#define MCF_FBCS5_CSCR         0xFC008044
+
+/* Bit definitions and macros for MCF_FBCS_CSAR */
+#define MCF_FBCS_CSAR_BA(x)    ((x)&0xFFFF0000)
+
+/* Bit definitions and macros for MCF_FBCS_CSMR */
+#define MCF_FBCS_CSMR_V                (0x00000001)
+#define MCF_FBCS_CSMR_WP       (0x00000100)
+#define MCF_FBCS_CSMR_BAM(x)   (((x)&0x0000FFFF)<<16)
+#define MCF_FBCS_CSMR_BAM_4G   (0xFFFF0000)
+#define MCF_FBCS_CSMR_BAM_2G   (0x7FFF0000)
+#define MCF_FBCS_CSMR_BAM_1G   (0x3FFF0000)
+#define MCF_FBCS_CSMR_BAM_1024M        (0x3FFF0000)
+#define MCF_FBCS_CSMR_BAM_512M (0x1FFF0000)
+#define MCF_FBCS_CSMR_BAM_256M (0x0FFF0000)
+#define MCF_FBCS_CSMR_BAM_128M (0x07FF0000)
+#define MCF_FBCS_CSMR_BAM_64M  (0x03FF0000)
+#define MCF_FBCS_CSMR_BAM_32M  (0x01FF0000)
+#define MCF_FBCS_CSMR_BAM_16M  (0x00FF0000)
+#define MCF_FBCS_CSMR_BAM_8M   (0x007F0000)
+#define MCF_FBCS_CSMR_BAM_4M   (0x003F0000)
+#define MCF_FBCS_CSMR_BAM_2M   (0x001F0000)
+#define MCF_FBCS_CSMR_BAM_1M   (0x000F0000)
+#define MCF_FBCS_CSMR_BAM_1024K        (0x000F0000)
+#define MCF_FBCS_CSMR_BAM_512K (0x00070000)
+#define MCF_FBCS_CSMR_BAM_256K (0x00030000)
+#define MCF_FBCS_CSMR_BAM_128K (0x00010000)
+#define MCF_FBCS_CSMR_BAM_64K  (0x00000000)
+
+/* Bit definitions and macros for MCF_FBCS_CSCR */
+#define MCF_FBCS_CSCR_BSTW     (0x00000008)
+#define MCF_FBCS_CSCR_BSTR     (0x00000010)
+#define MCF_FBCS_CSCR_BEM      (0x00000020)
+#define MCF_FBCS_CSCR_PS(x)    (((x)&0x00000003)<<6)
+#define MCF_FBCS_CSCR_AA       (0x00000100)
+#define MCF_FBCS_CSCR_SBM      (0x00000200)
+#define MCF_FBCS_CSCR_WS(x)    (((x)&0x0000003F)<<10)
+#define MCF_FBCS_CSCR_WRAH(x)  (((x)&0x00000003)<<16)
+#define MCF_FBCS_CSCR_RDAH(x)  (((x)&0x00000003)<<18)
+#define MCF_FBCS_CSCR_ASET(x)  (((x)&0x00000003)<<20)
+#define MCF_FBCS_CSCR_SWSEN    (0x00800000)
+#define MCF_FBCS_CSCR_SWS(x)   (((x)&0x0000003F)<<26)
+#define MCF_FBCS_CSCR_PS_8     (0x0040)
+#define MCF_FBCS_CSCR_PS_16    (0x0080)
+#define MCF_FBCS_CSCR_PS_32    (0x0000)
+
+/*********************************************************************
+ *
+ * General Purpose I/O (GPIO)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCFGPIO_PODR_FECH              (0xFC0A4000)
+#define MCFGPIO_PODR_FECL              (0xFC0A4001)
+#define MCFGPIO_PODR_SSI               (0xFC0A4002)
+#define MCFGPIO_PODR_BUSCTL            (0xFC0A4003)
+#define MCFGPIO_PODR_BE                        (0xFC0A4004)
+#define MCFGPIO_PODR_CS                        (0xFC0A4005)
+#define MCFGPIO_PODR_PWM               (0xFC0A4006)
+#define MCFGPIO_PODR_FECI2C            (0xFC0A4007)
+#define MCFGPIO_PODR_UART              (0xFC0A4009)
+#define MCFGPIO_PODR_QSPI              (0xFC0A400A)
+#define MCFGPIO_PODR_TIMER             (0xFC0A400B)
+#define MCFGPIO_PODR_LCDDATAH          (0xFC0A400D)
+#define MCFGPIO_PODR_LCDDATAM          (0xFC0A400E)
+#define MCFGPIO_PODR_LCDDATAL          (0xFC0A400F)
+#define MCFGPIO_PODR_LCDCTLH           (0xFC0A4010)
+#define MCFGPIO_PODR_LCDCTLL           (0xFC0A4011)
+#define MCFGPIO_PDDR_FECH              (0xFC0A4014)
+#define MCFGPIO_PDDR_FECL              (0xFC0A4015)
+#define MCFGPIO_PDDR_SSI               (0xFC0A4016)
+#define MCFGPIO_PDDR_BUSCTL            (0xFC0A4017)
+#define MCFGPIO_PDDR_BE                        (0xFC0A4018)
+#define MCFGPIO_PDDR_CS                        (0xFC0A4019)
+#define MCFGPIO_PDDR_PWM               (0xFC0A401A)
+#define MCFGPIO_PDDR_FECI2C            (0xFC0A401B)
+#define MCFGPIO_PDDR_UART              (0xFC0A401C)
+#define MCFGPIO_PDDR_QSPI              (0xFC0A401E)
+#define MCFGPIO_PDDR_TIMER             (0xFC0A401F)
+#define MCFGPIO_PDDR_LCDDATAH          (0xFC0A4021)
+#define MCFGPIO_PDDR_LCDDATAM          (0xFC0A4022)
+#define MCFGPIO_PDDR_LCDDATAL          (0xFC0A4023)
+#define MCFGPIO_PDDR_LCDCTLH           (0xFC0A4024)
+#define MCFGPIO_PDDR_LCDCTLL           (0xFC0A4025)
+#define MCFGPIO_PPDSDR_FECH            (0xFC0A4028)
+#define MCFGPIO_PPDSDR_FECL            (0xFC0A4029)
+#define MCFGPIO_PPDSDR_SSI             (0xFC0A402A)
+#define MCFGPIO_PPDSDR_BUSCTL          (0xFC0A402B)
+#define MCFGPIO_PPDSDR_BE              (0xFC0A402C)
+#define MCFGPIO_PPDSDR_CS              (0xFC0A402D)
+#define MCFGPIO_PPDSDR_PWM             (0xFC0A402E)
+#define MCFGPIO_PPDSDR_FECI2C          (0xFC0A402F)
+#define MCFGPIO_PPDSDR_UART            (0xFC0A4031)
+#define MCFGPIO_PPDSDR_QSPI            (0xFC0A4032)
+#define MCFGPIO_PPDSDR_TIMER           (0xFC0A4033)
+#define MCFGPIO_PPDSDR_LCDDATAH                (0xFC0A4035)
+#define MCFGPIO_PPDSDR_LCDDATAM                (0xFC0A4036)
+#define MCFGPIO_PPDSDR_LCDDATAL                (0xFC0A4037)
+#define MCFGPIO_PPDSDR_LCDCTLH         (0xFC0A4038)
+#define MCFGPIO_PPDSDR_LCDCTLL         (0xFC0A4039)
+#define MCFGPIO_PCLRR_FECH             (0xFC0A403C)
+#define MCFGPIO_PCLRR_FECL             (0xFC0A403D)
+#define MCFGPIO_PCLRR_SSI              (0xFC0A403E)
+#define MCFGPIO_PCLRR_BUSCTL           (0xFC0A403F)
+#define MCFGPIO_PCLRR_BE               (0xFC0A4040)
+#define MCFGPIO_PCLRR_CS               (0xFC0A4041)
+#define MCFGPIO_PCLRR_PWM              (0xFC0A4042)
+#define MCFGPIO_PCLRR_FECI2C           (0xFC0A4043)
+#define MCFGPIO_PCLRR_UART             (0xFC0A4045)
+#define MCFGPIO_PCLRR_QSPI             (0xFC0A4046)
+#define MCFGPIO_PCLRR_TIMER            (0xFC0A4047)
+#define MCFGPIO_PCLRR_LCDDATAH         (0xFC0A4049)
+#define MCFGPIO_PCLRR_LCDDATAM         (0xFC0A404A)
+#define MCFGPIO_PCLRR_LCDDATAL         (0xFC0A404B)
+#define MCFGPIO_PCLRR_LCDCTLH          (0xFC0A404C)
+#define MCFGPIO_PCLRR_LCDCTLL          (0xFC0A404D)
+#define MCFGPIO_PAR_FEC                        (0xFC0A4050)
+#define MCFGPIO_PAR_PWM                        (0xFC0A4051)
+#define MCFGPIO_PAR_BUSCTL             (0xFC0A4052)
+#define MCFGPIO_PAR_FECI2C             (0xFC0A4053)
+#define MCFGPIO_PAR_BE                 (0xFC0A4054)
+#define MCFGPIO_PAR_CS                 (0xFC0A4055)
+#define MCFGPIO_PAR_SSI                        (0xFC0A4056)
+#define MCFGPIO_PAR_UART               (0xFC0A4058)
+#define MCFGPIO_PAR_QSPI               (0xFC0A405A)
+#define MCFGPIO_PAR_TIMER              (0xFC0A405C)
+#define MCFGPIO_PAR_LCDDATA            (0xFC0A405D)
+#define MCFGPIO_PAR_LCDCTL             (0xFC0A405E)
+#define MCFGPIO_PAR_IRQ                        (0xFC0A4060)
+#define MCFGPIO_MSCR_FLEXBUS           (0xFC0A4064)
+#define MCFGPIO_MSCR_SDRAM             (0xFC0A4065)
+#define MCFGPIO_DSCR_I2C               (0xFC0A4068)
+#define MCFGPIO_DSCR_PWM               (0xFC0A4069)
+#define MCFGPIO_DSCR_FEC               (0xFC0A406A)
+#define MCFGPIO_DSCR_UART              (0xFC0A406B)
+#define MCFGPIO_DSCR_QSPI              (0xFC0A406C)
+#define MCFGPIO_DSCR_TIMER             (0xFC0A406D)
+#define MCFGPIO_DSCR_SSI               (0xFC0A406E)
+#define MCFGPIO_DSCR_LCD               (0xFC0A406F)
+#define MCFGPIO_DSCR_DEBUG             (0xFC0A4070)
+#define MCFGPIO_DSCR_CLKRST            (0xFC0A4071)
+#define MCFGPIO_DSCR_IRQ               (0xFC0A4072)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FECH */
+#define MCF_GPIO_PODR_FECH_PODR_FECH0              (0x01)
+#define MCF_GPIO_PODR_FECH_PODR_FECH1              (0x02)
+#define MCF_GPIO_PODR_FECH_PODR_FECH2              (0x04)
+#define MCF_GPIO_PODR_FECH_PODR_FECH3              (0x08)
+#define MCF_GPIO_PODR_FECH_PODR_FECH4              (0x10)
+#define MCF_GPIO_PODR_FECH_PODR_FECH5              (0x20)
+#define MCF_GPIO_PODR_FECH_PODR_FECH6              (0x40)
+#define MCF_GPIO_PODR_FECH_PODR_FECH7              (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FECL */
+#define MCF_GPIO_PODR_FECL_PODR_FECL0              (0x01)
+#define MCF_GPIO_PODR_FECL_PODR_FECL1              (0x02)
+#define MCF_GPIO_PODR_FECL_PODR_FECL2              (0x04)
+#define MCF_GPIO_PODR_FECL_PODR_FECL3              (0x08)
+#define MCF_GPIO_PODR_FECL_PODR_FECL4              (0x10)
+#define MCF_GPIO_PODR_FECL_PODR_FECL5              (0x20)
+#define MCF_GPIO_PODR_FECL_PODR_FECL6              (0x40)
+#define MCF_GPIO_PODR_FECL_PODR_FECL7              (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_SSI */
+#define MCF_GPIO_PODR_SSI_PODR_SSI0                (0x01)
+#define MCF_GPIO_PODR_SSI_PODR_SSI1                (0x02)
+#define MCF_GPIO_PODR_SSI_PODR_SSI2                (0x04)
+#define MCF_GPIO_PODR_SSI_PODR_SSI3                (0x08)
+#define MCF_GPIO_PODR_SSI_PODR_SSI4                (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_BUSCTL */
+#define MCF_GPIO_PODR_BUSCTL_POSDR_BUSCTL0         (0x01)
+#define MCF_GPIO_PODR_BUSCTL_PODR_BUSCTL1          (0x02)
+#define MCF_GPIO_PODR_BUSCTL_PODR_BUSCTL2          (0x04)
+#define MCF_GPIO_PODR_BUSCTL_PODR_BUSCTL3          (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_BE */
+#define MCF_GPIO_PODR_BE_PODR_BE0                  (0x01)
+#define MCF_GPIO_PODR_BE_PODR_BE1                  (0x02)
+#define MCF_GPIO_PODR_BE_PODR_BE2                  (0x04)
+#define MCF_GPIO_PODR_BE_PODR_BE3                  (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_CS */
+#define MCF_GPIO_PODR_CS_PODR_CS1                  (0x02)
+#define MCF_GPIO_PODR_CS_PODR_CS2                  (0x04)
+#define MCF_GPIO_PODR_CS_PODR_CS3                  (0x08)
+#define MCF_GPIO_PODR_CS_PODR_CS4                  (0x10)
+#define MCF_GPIO_PODR_CS_PODR_CS5                  (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_PWM */
+#define MCF_GPIO_PODR_PWM_PODR_PWM2                (0x04)
+#define MCF_GPIO_PODR_PWM_PODR_PWM3                (0x08)
+#define MCF_GPIO_PODR_PWM_PODR_PWM4                (0x10)
+#define MCF_GPIO_PODR_PWM_PODR_PWM5                (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_FECI2C */
+#define MCF_GPIO_PODR_FECI2C_PODR_FECI2C0          (0x01)
+#define MCF_GPIO_PODR_FECI2C_PODR_FECI2C1          (0x02)
+#define MCF_GPIO_PODR_FECI2C_PODR_FECI2C2          (0x04)
+#define MCF_GPIO_PODR_FECI2C_PODR_FECI2C3          (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_UART */
+#define MCF_GPIO_PODR_UART_PODR_UART0              (0x01)
+#define MCF_GPIO_PODR_UART_PODR_UART1              (0x02)
+#define MCF_GPIO_PODR_UART_PODR_UART2              (0x04)
+#define MCF_GPIO_PODR_UART_PODR_UART3              (0x08)
+#define MCF_GPIO_PODR_UART_PODR_UART4              (0x10)
+#define MCF_GPIO_PODR_UART_PODR_UART5              (0x20)
+#define MCF_GPIO_PODR_UART_PODR_UART6              (0x40)
+#define MCF_GPIO_PODR_UART_PODR_UART7              (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_QSPI */
+#define MCF_GPIO_PODR_QSPI_PODR_QSPI0              (0x01)
+#define MCF_GPIO_PODR_QSPI_PODR_QSPI1              (0x02)
+#define MCF_GPIO_PODR_QSPI_PODR_QSPI2              (0x04)
+#define MCF_GPIO_PODR_QSPI_PODR_QSPI3              (0x08)
+#define MCF_GPIO_PODR_QSPI_PODR_QSPI4              (0x10)
+#define MCF_GPIO_PODR_QSPI_PODR_QSPI5              (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_TIMER */
+#define MCF_GPIO_PODR_TIMER_PODR_TIMER0            (0x01)
+#define MCF_GPIO_PODR_TIMER_PODR_TIMER1            (0x02)
+#define MCF_GPIO_PODR_TIMER_PODR_TIMER2            (0x04)
+#define MCF_GPIO_PODR_TIMER_PODR_TIMER3            (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_LCDDATAH */
+#define MCF_GPIO_PODR_LCDDATAH_PODR_LCDDATAH0      (0x01)
+#define MCF_GPIO_PODR_LCDDATAH_PODR_LCDDATAH1      (0x02)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_LCDDATAM */
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM0      (0x01)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM1      (0x02)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM2      (0x04)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM3      (0x08)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM4      (0x10)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM5      (0x20)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM6      (0x40)
+#define MCF_GPIO_PODR_LCDDATAM_PODR_LCDDATAM7      (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_LCDDATAL */
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL0      (0x01)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL1      (0x02)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL2      (0x04)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL3      (0x08)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL4      (0x10)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL5      (0x20)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL6      (0x40)
+#define MCF_GPIO_PODR_LCDDATAL_PODR_LCDDATAL7      (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_LCDCTLH */
+#define MCF_GPIO_PODR_LCDCTLH_PODR_LCDCTLH0        (0x01)
+
+/* Bit definitions and macros for MCF_GPIO_PODR_LCDCTLL */
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL0        (0x01)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL1        (0x02)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL2        (0x04)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL3        (0x08)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL4        (0x10)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL5        (0x20)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL6        (0x40)
+#define MCF_GPIO_PODR_LCDCTLL_PODR_LCDCTLL7        (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FECH */
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH0              (0x01)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH1              (0x02)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH2              (0x04)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH3              (0x08)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH4              (0x10)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH5              (0x20)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH6              (0x40)
+#define MCF_GPIO_PDDR_FECH_PDDR_FECH7              (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FECL */
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL0              (0x01)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL1              (0x02)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL2              (0x04)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL3              (0x08)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL4              (0x10)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL5              (0x20)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL6              (0x40)
+#define MCF_GPIO_PDDR_FECL_PDDR_FECL7              (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_SSI */
+#define MCF_GPIO_PDDR_SSI_PDDR_SSI0                (0x01)
+#define MCF_GPIO_PDDR_SSI_PDDR_SSI1                (0x02)
+#define MCF_GPIO_PDDR_SSI_PDDR_SSI2                (0x04)
+#define MCF_GPIO_PDDR_SSI_PDDR_SSI3                (0x08)
+#define MCF_GPIO_PDDR_SSI_PDDR_SSI4                (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_BUSCTL */
+#define MCF_GPIO_PDDR_BUSCTL_POSDR_BUSCTL0         (0x01)
+#define MCF_GPIO_PDDR_BUSCTL_PDDR_BUSCTL1          (0x02)
+#define MCF_GPIO_PDDR_BUSCTL_PDDR_BUSCTL2          (0x04)
+#define MCF_GPIO_PDDR_BUSCTL_PDDR_BUSCTL3          (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_BE */
+#define MCF_GPIO_PDDR_BE_PDDR_BE0                  (0x01)
+#define MCF_GPIO_PDDR_BE_PDDR_BE1                  (0x02)
+#define MCF_GPIO_PDDR_BE_PDDR_BE2                  (0x04)
+#define MCF_GPIO_PDDR_BE_PDDR_BE3                  (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_CS */
+#define MCF_GPIO_PDDR_CS_PDDR_CS1                  (0x02)
+#define MCF_GPIO_PDDR_CS_PDDR_CS2                  (0x04)
+#define MCF_GPIO_PDDR_CS_PDDR_CS3                  (0x08)
+#define MCF_GPIO_PDDR_CS_PDDR_CS4                  (0x10)
+#define MCF_GPIO_PDDR_CS_PDDR_CS5                  (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_PWM */
+#define MCF_GPIO_PDDR_PWM_PDDR_PWM2                (0x04)
+#define MCF_GPIO_PDDR_PWM_PDDR_PWM3                (0x08)
+#define MCF_GPIO_PDDR_PWM_PDDR_PWM4                (0x10)
+#define MCF_GPIO_PDDR_PWM_PDDR_PWM5                (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_FECI2C */
+#define MCF_GPIO_PDDR_FECI2C_PDDR_FECI2C0          (0x01)
+#define MCF_GPIO_PDDR_FECI2C_PDDR_FECI2C1          (0x02)
+#define MCF_GPIO_PDDR_FECI2C_PDDR_FECI2C2          (0x04)
+#define MCF_GPIO_PDDR_FECI2C_PDDR_FECI2C3          (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_UART */
+#define MCF_GPIO_PDDR_UART_PDDR_UART0              (0x01)
+#define MCF_GPIO_PDDR_UART_PDDR_UART1              (0x02)
+#define MCF_GPIO_PDDR_UART_PDDR_UART2              (0x04)
+#define MCF_GPIO_PDDR_UART_PDDR_UART3              (0x08)
+#define MCF_GPIO_PDDR_UART_PDDR_UART4              (0x10)
+#define MCF_GPIO_PDDR_UART_PDDR_UART5              (0x20)
+#define MCF_GPIO_PDDR_UART_PDDR_UART6              (0x40)
+#define MCF_GPIO_PDDR_UART_PDDR_UART7              (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_QSPI */
+#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI0              (0x01)
+#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI1              (0x02)
+#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI2              (0x04)
+#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI3              (0x08)
+#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI4              (0x10)
+#define MCF_GPIO_PDDR_QSPI_PDDR_QSPI5              (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_TIMER */
+#define MCF_GPIO_PDDR_TIMER_PDDR_TIMER0            (0x01)
+#define MCF_GPIO_PDDR_TIMER_PDDR_TIMER1            (0x02)
+#define MCF_GPIO_PDDR_TIMER_PDDR_TIMER2            (0x04)
+#define MCF_GPIO_PDDR_TIMER_PDDR_TIMER3            (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_LCDDATAH */
+#define MCF_GPIO_PDDR_LCDDATAH_PDDR_LCDDATAH0      (0x01)
+#define MCF_GPIO_PDDR_LCDDATAH_PDDR_LCDDATAH1      (0x02)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_LCDDATAM */
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM0      (0x01)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM1      (0x02)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM2      (0x04)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM3      (0x08)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM4      (0x10)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM5      (0x20)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM6      (0x40)
+#define MCF_GPIO_PDDR_LCDDATAM_PDDR_LCDDATAM7      (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_LCDDATAL */
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL0      (0x01)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL1      (0x02)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL2      (0x04)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL3      (0x08)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL4      (0x10)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL5      (0x20)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL6      (0x40)
+#define MCF_GPIO_PDDR_LCDDATAL_PDDR_LCDDATAL7      (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_LCDCTLH */
+#define MCF_GPIO_PDDR_LCDCTLH_PDDR_LCDCTLH0        (0x01)
+
+/* Bit definitions and macros for MCF_GPIO_PDDR_LCDCTLL */
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL0        (0x01)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL1        (0x02)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL2        (0x04)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL3        (0x08)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL4        (0x10)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL5        (0x20)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL6        (0x40)
+#define MCF_GPIO_PDDR_LCDCTLL_PDDR_LCDCTLL7        (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FECH */
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH0          (0x01)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH1          (0x02)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH2          (0x04)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH3          (0x08)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH4          (0x10)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH5          (0x20)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH6          (0x40)
+#define MCF_GPIO_PPDSDR_FECH_PPDSDR_FECH7          (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FECL */
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL0          (0x01)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL1          (0x02)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL2          (0x04)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL3          (0x08)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL4          (0x10)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL5          (0x20)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL6          (0x40)
+#define MCF_GPIO_PPDSDR_FECL_PPDSDR_FECL7          (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_SSI */
+#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI0            (0x01)
+#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI1            (0x02)
+#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI2            (0x04)
+#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI3            (0x08)
+#define MCF_GPIO_PPDSDR_SSI_PPDSDR_SSI4            (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_BUSCTL */
+#define MCF_GPIO_PPDSDR_BUSCTL_POSDR_BUSCTL0       (0x01)
+#define MCF_GPIO_PPDSDR_BUSCTL_PPDSDR_BUSCTL1      (0x02)
+#define MCF_GPIO_PPDSDR_BUSCTL_PPDSDR_BUSCTL2      (0x04)
+#define MCF_GPIO_PPDSDR_BUSCTL_PPDSDR_BUSCTL3      (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_BE */
+#define MCF_GPIO_PPDSDR_BE_PPDSDR_BE0              (0x01)
+#define MCF_GPIO_PPDSDR_BE_PPDSDR_BE1              (0x02)
+#define MCF_GPIO_PPDSDR_BE_PPDSDR_BE2              (0x04)
+#define MCF_GPIO_PPDSDR_BE_PPDSDR_BE3              (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_CS */
+#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS1              (0x02)
+#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS2              (0x04)
+#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS3              (0x08)
+#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS4              (0x10)
+#define MCF_GPIO_PPDSDR_CS_PPDSDR_CS5              (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_PWM */
+#define MCF_GPIO_PPDSDR_PWM_PPDSDR_PWM2            (0x04)
+#define MCF_GPIO_PPDSDR_PWM_PPDSDR_PWM3            (0x08)
+#define MCF_GPIO_PPDSDR_PWM_PPDSDR_PWM4            (0x10)
+#define MCF_GPIO_PPDSDR_PWM_PPDSDR_PWM5            (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_FECI2C */
+#define MCF_GPIO_PPDSDR_FECI2C_PPDSDR_FECI2C0      (0x01)
+#define MCF_GPIO_PPDSDR_FECI2C_PPDSDR_FECI2C1      (0x02)
+#define MCF_GPIO_PPDSDR_FECI2C_PPDSDR_FECI2C2      (0x04)
+#define MCF_GPIO_PPDSDR_FECI2C_PPDSDR_FECI2C3      (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_UART */
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART0          (0x01)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART1          (0x02)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART2          (0x04)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART3          (0x08)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART4          (0x10)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART5          (0x20)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART6          (0x40)
+#define MCF_GPIO_PPDSDR_UART_PPDSDR_UART7          (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_QSPI */
+#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI0          (0x01)
+#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI1          (0x02)
+#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI2          (0x04)
+#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI3          (0x08)
+#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI4          (0x10)
+#define MCF_GPIO_PPDSDR_QSPI_PPDSDR_QSPI5          (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_TIMER */
+#define MCF_GPIO_PPDSDR_TIMER_PPDSDR_TIMER0        (0x01)
+#define MCF_GPIO_PPDSDR_TIMER_PPDSDR_TIMER1        (0x02)
+#define MCF_GPIO_PPDSDR_TIMER_PPDSDR_TIMER2        (0x04)
+#define MCF_GPIO_PPDSDR_TIMER_PPDSDR_TIMER3        (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDDATAH */
+#define MCF_GPIO_PPDSDR_LCDDATAH_PPDSDR_LCDDATAH0  (0x01)
+#define MCF_GPIO_PPDSDR_LCDDATAH_PPDSDR_LCDDATAH1  (0x02)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDDATAM */
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM0  (0x01)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM1  (0x02)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM2  (0x04)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM3  (0x08)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM4  (0x10)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM5  (0x20)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM6  (0x40)
+#define MCF_GPIO_PPDSDR_LCDDATAM_PPDSDR_LCDDATAM7  (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDDATAL */
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL0  (0x01)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL1  (0x02)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL2  (0x04)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL3  (0x08)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL4  (0x10)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL5  (0x20)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL6  (0x40)
+#define MCF_GPIO_PPDSDR_LCDDATAL_PPDSDR_LCDDATAL7  (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDCTLH */
+#define MCF_GPIO_PPDSDR_LCDCTLH_PPDSDR_LCDCTLH0    (0x01)
+
+/* Bit definitions and macros for MCF_GPIO_PPDSDR_LCDCTLL */
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL0    (0x01)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL1    (0x02)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL2    (0x04)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL3    (0x08)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL4    (0x10)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL5    (0x20)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL6    (0x40)
+#define MCF_GPIO_PPDSDR_LCDCTLL_PPDSDR_LCDCTLL7    (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FECH */
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH0            (0x01)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH1            (0x02)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH2            (0x04)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH3            (0x08)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH4            (0x10)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH5            (0x20)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH6            (0x40)
+#define MCF_GPIO_PCLRR_FECH_PCLRR_FECH7            (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FECL */
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL0            (0x01)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL1            (0x02)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL2            (0x04)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL3            (0x08)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL4            (0x10)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL5            (0x20)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL6            (0x40)
+#define MCF_GPIO_PCLRR_FECL_PCLRR_FECL7            (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_SSI */
+#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI0              (0x01)
+#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI1              (0x02)
+#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI2              (0x04)
+#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI3              (0x08)
+#define MCF_GPIO_PCLRR_SSI_PCLRR_SSI4              (0x10)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_BUSCTL */
+#define MCF_GPIO_PCLRR_BUSCTL_POSDR_BUSCTL0        (0x01)
+#define MCF_GPIO_PCLRR_BUSCTL_PCLRR_BUSCTL1        (0x02)
+#define MCF_GPIO_PCLRR_BUSCTL_PCLRR_BUSCTL2        (0x04)
+#define MCF_GPIO_PCLRR_BUSCTL_PCLRR_BUSCTL3        (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_BE */
+#define MCF_GPIO_PCLRR_BE_PCLRR_BE0                (0x01)
+#define MCF_GPIO_PCLRR_BE_PCLRR_BE1                (0x02)
+#define MCF_GPIO_PCLRR_BE_PCLRR_BE2                (0x04)
+#define MCF_GPIO_PCLRR_BE_PCLRR_BE3                (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_CS */
+#define MCF_GPIO_PCLRR_CS_PCLRR_CS1                (0x02)
+#define MCF_GPIO_PCLRR_CS_PCLRR_CS2                (0x04)
+#define MCF_GPIO_PCLRR_CS_PCLRR_CS3                (0x08)
+#define MCF_GPIO_PCLRR_CS_PCLRR_CS4                (0x10)
+#define MCF_GPIO_PCLRR_CS_PCLRR_CS5                (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_PWM */
+#define MCF_GPIO_PCLRR_PWM_PCLRR_PWM2              (0x04)
+#define MCF_GPIO_PCLRR_PWM_PCLRR_PWM3              (0x08)
+#define MCF_GPIO_PCLRR_PWM_PCLRR_PWM4              (0x10)
+#define MCF_GPIO_PCLRR_PWM_PCLRR_PWM5              (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_FECI2C */
+#define MCF_GPIO_PCLRR_FECI2C_PCLRR_FECI2C0        (0x01)
+#define MCF_GPIO_PCLRR_FECI2C_PCLRR_FECI2C1        (0x02)
+#define MCF_GPIO_PCLRR_FECI2C_PCLRR_FECI2C2        (0x04)
+#define MCF_GPIO_PCLRR_FECI2C_PCLRR_FECI2C3        (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_UART */
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART0            (0x01)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART1            (0x02)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART2            (0x04)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART3            (0x08)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART4            (0x10)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART5            (0x20)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART6            (0x40)
+#define MCF_GPIO_PCLRR_UART_PCLRR_UART7            (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_QSPI */
+#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI0            (0x01)
+#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI1            (0x02)
+#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI2            (0x04)
+#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI3            (0x08)
+#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI4            (0x10)
+#define MCF_GPIO_PCLRR_QSPI_PCLRR_QSPI5            (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_TIMER */
+#define MCF_GPIO_PCLRR_TIMER_PCLRR_TIMER0          (0x01)
+#define MCF_GPIO_PCLRR_TIMER_PCLRR_TIMER1          (0x02)
+#define MCF_GPIO_PCLRR_TIMER_PCLRR_TIMER2          (0x04)
+#define MCF_GPIO_PCLRR_TIMER_PCLRR_TIMER3          (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDDATAH */
+#define MCF_GPIO_PCLRR_LCDDATAH_PCLRR_LCDDATAH0    (0x01)
+#define MCF_GPIO_PCLRR_LCDDATAH_PCLRR_LCDDATAH1    (0x02)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDDATAM */
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM0    (0x01)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM1    (0x02)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM2    (0x04)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM3    (0x08)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM4    (0x10)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM5    (0x20)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM6    (0x40)
+#define MCF_GPIO_PCLRR_LCDDATAM_PCLRR_LCDDATAM7    (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDDATAL */
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL0    (0x01)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL1    (0x02)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL2    (0x04)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL3    (0x08)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL4    (0x10)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL5    (0x20)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL6    (0x40)
+#define MCF_GPIO_PCLRR_LCDDATAL_PCLRR_LCDDATAL7    (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDCTLH */
+#define MCF_GPIO_PCLRR_LCDCTLH_PCLRR_LCDCTLH0      (0x01)
+
+/* Bit definitions and macros for MCF_GPIO_PCLRR_LCDCTLL */
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL0      (0x01)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL1      (0x02)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL2      (0x04)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL3      (0x08)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL4      (0x10)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL5      (0x20)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL6      (0x40)
+#define MCF_GPIO_PCLRR_LCDCTLL_PCLRR_LCDCTLL7      (0x80)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_FEC */
+#define MCF_GPIO_PAR_FEC_PAR_FEC_MII(x)            (((x)&0x03)<<0)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_7W(x)             (((x)&0x03)<<2)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_7W_GPIO           (0x00)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_7W_URTS1          (0x04)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC            (0x0C)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_MII_GPIO          (0x00)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_MII_UART          (0x01)
+#define MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC           (0x03)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_PWM */
+#define MCF_GPIO_PAR_PWM_PAR_PWM1(x)               (((x)&0x03)<<0)
+#define MCF_GPIO_PAR_PWM_PAR_PWM3(x)               (((x)&0x03)<<2)
+#define MCF_GPIO_PAR_PWM_PAR_PWM5                  (0x10)
+#define MCF_GPIO_PAR_PWM_PAR_PWM7                  (0x20)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_BUSCTL */
+#define MCF_GPIO_PAR_BUSCTL_PAR_TS(x)              (((x)&0x03)<<3)
+#define MCF_GPIO_PAR_BUSCTL_PAR_RWB                (0x20)
+#define MCF_GPIO_PAR_BUSCTL_PAR_TA                 (0x40)
+#define MCF_GPIO_PAR_BUSCTL_PAR_OE                 (0x80)
+#define MCF_GPIO_PAR_BUSCTL_PAR_OE_GPIO            (0x00)
+#define MCF_GPIO_PAR_BUSCTL_PAR_OE_OE              (0x80)
+#define MCF_GPIO_PAR_BUSCTL_PAR_TA_GPIO            (0x00)
+#define MCF_GPIO_PAR_BUSCTL_PAR_TA_TA              (0x40)
+#define MCF_GPIO_PAR_BUSCTL_PAR_RWB_GPIO           (0x00)
+#define MCF_GPIO_PAR_BUSCTL_PAR_RWB_RWB            (0x20)
+#define MCF_GPIO_PAR_BUSCTL_PAR_TS_GPIO            (0x00)
+#define MCF_GPIO_PAR_BUSCTL_PAR_TS_DACK0           (0x10)
+#define MCF_GPIO_PAR_BUSCTL_PAR_TS_TS              (0x18)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_FECI2C */
+#define MCF_GPIO_PAR_FECI2C_PAR_SDA(x)             (((x)&0x03)<<0)
+#define MCF_GPIO_PAR_FECI2C_PAR_SCL(x)             (((x)&0x03)<<2)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDIO(x)            (((x)&0x03)<<4)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDC(x)             (((x)&0x03)<<6)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDC_GPIO           (0x00)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDC_UTXD2          (0x40)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDC_SCL            (0x80)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC           (0xC0)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDIO_GPIO          (0x00)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDIO_URXD2         (0x10)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDIO_SDA           (0x20)
+#define MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO         (0x30)
+#define MCF_GPIO_PAR_FECI2C_PAR_SCL_GPIO           (0x00)
+#define MCF_GPIO_PAR_FECI2C_PAR_SCL_UTXD2          (0x04)
+#define MCF_GPIO_PAR_FECI2C_PAR_SCL_SCL            (0x0C)
+#define MCF_GPIO_PAR_FECI2C_PAR_SDA_GPIO           (0x00)
+#define MCF_GPIO_PAR_FECI2C_PAR_SDA_URXD2          (0x02)
+#define MCF_GPIO_PAR_FECI2C_PAR_SDA_SDA            (0x03)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_BE */
+#define MCF_GPIO_PAR_BE_PAR_BE0                    (0x01)
+#define MCF_GPIO_PAR_BE_PAR_BE1                    (0x02)
+#define MCF_GPIO_PAR_BE_PAR_BE2                    (0x04)
+#define MCF_GPIO_PAR_BE_PAR_BE3                    (0x08)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_CS */
+#define MCF_GPIO_PAR_CS_PAR_CS1                    (0x02)
+#define MCF_GPIO_PAR_CS_PAR_CS2                    (0x04)
+#define MCF_GPIO_PAR_CS_PAR_CS3                    (0x08)
+#define MCF_GPIO_PAR_CS_PAR_CS4                    (0x10)
+#define MCF_GPIO_PAR_CS_PAR_CS5                    (0x20)
+#define MCF_GPIO_PAR_CS_PAR_CS_CS1_GPIO            (0x00)
+#define MCF_GPIO_PAR_CS_PAR_CS_CS1_SDCS1           (0x01)
+#define MCF_GPIO_PAR_CS_PAR_CS_CS1_CS1             (0x03)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_SSI */
+#define MCF_GPIO_PAR_SSI_PAR_MCLK                  (0x0080)
+#define MCF_GPIO_PAR_SSI_PAR_TXD(x)                (((x)&0x0003)<<8)
+#define MCF_GPIO_PAR_SSI_PAR_RXD(x)                (((x)&0x0003)<<10)
+#define MCF_GPIO_PAR_SSI_PAR_FS(x)                 (((x)&0x0003)<<12)
+#define MCF_GPIO_PAR_SSI_PAR_BCLK(x)               (((x)&0x0003)<<14)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_UART */
+#define MCF_GPIO_PAR_UART_PAR_UTXD0                (0x0001)
+#define MCF_GPIO_PAR_UART_PAR_URXD0                (0x0002)
+#define MCF_GPIO_PAR_UART_PAR_URTS0                (0x0004)
+#define MCF_GPIO_PAR_UART_PAR_UCTS0                (0x0008)
+#define MCF_GPIO_PAR_UART_PAR_UTXD1(x)             (((x)&0x0003)<<4)
+#define MCF_GPIO_PAR_UART_PAR_URXD1(x)             (((x)&0x0003)<<6)
+#define MCF_GPIO_PAR_UART_PAR_URTS1(x)             (((x)&0x0003)<<8)
+#define MCF_GPIO_PAR_UART_PAR_UCTS1(x)             (((x)&0x0003)<<10)
+#define MCF_GPIO_PAR_UART_PAR_UCTS1_GPIO           (0x0000)
+#define MCF_GPIO_PAR_UART_PAR_UCTS1_SSI_BCLK       (0x0800)
+#define MCF_GPIO_PAR_UART_PAR_UCTS1_ULPI_D7        (0x0400)
+#define MCF_GPIO_PAR_UART_PAR_UCTS1_UCTS1          (0x0C00)
+#define MCF_GPIO_PAR_UART_PAR_URTS1_GPIO           (0x0000)
+#define MCF_GPIO_PAR_UART_PAR_URTS1_SSI_FS         (0x0200)
+#define MCF_GPIO_PAR_UART_PAR_URTS1_ULPI_D6        (0x0100)
+#define MCF_GPIO_PAR_UART_PAR_URTS1_URTS1          (0x0300)
+#define MCF_GPIO_PAR_UART_PAR_URXD1_GPIO           (0x0000)
+#define MCF_GPIO_PAR_UART_PAR_URXD1_SSI_RXD        (0x0080)
+#define MCF_GPIO_PAR_UART_PAR_URXD1_ULPI_D5        (0x0040)
+#define MCF_GPIO_PAR_UART_PAR_URXD1_URXD1          (0x00C0)
+#define MCF_GPIO_PAR_UART_PAR_UTXD1_GPIO           (0x0000)
+#define MCF_GPIO_PAR_UART_PAR_UTXD1_SSI_TXD        (0x0020)
+#define MCF_GPIO_PAR_UART_PAR_UTXD1_ULPI_D4        (0x0010)
+#define MCF_GPIO_PAR_UART_PAR_UTXD1_UTXD1          (0x0030)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_QSPI */
+#define MCF_GPIO_PAR_QSPI_PAR_SCK(x)               (((x)&0x0003)<<4)
+#define MCF_GPIO_PAR_QSPI_PAR_DOUT(x)              (((x)&0x0003)<<6)
+#define MCF_GPIO_PAR_QSPI_PAR_DIN(x)               (((x)&0x0003)<<8)
+#define MCF_GPIO_PAR_QSPI_PAR_PCS0(x)              (((x)&0x0003)<<10)
+#define MCF_GPIO_PAR_QSPI_PAR_PCS1(x)              (((x)&0x0003)<<12)
+#define MCF_GPIO_PAR_QSPI_PAR_PCS2(x)              (((x)&0x0003)<<14)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_TIMER */
+#define MCF_GPIO_PAR_TIMER_PAR_TIN0(x)             (((x)&0x03)<<0)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN1(x)             (((x)&0x03)<<2)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2(x)             (((x)&0x03)<<4)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3(x)             (((x)&0x03)<<6)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3_GPIO           (0x00)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3_TOUT3          (0x80)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3_URXD2          (0x40)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN3_TIN3           (0xC0)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2_GPIO           (0x00)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2_TOUT2          (0x20)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2_UTXD2          (0x10)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN2_TIN2           (0x30)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN1_GPIO           (0x00)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN1_TOUT1          (0x08)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN1_DACK1          (0x04)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN1_TIN1           (0x0C)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN0_GPIO           (0x00)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN0_TOUT0          (0x02)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN0_DREQ0          (0x01)
+#define MCF_GPIO_PAR_TIMER_PAR_TIN0_TIN0           (0x03)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_LCDDATA */
+#define MCF_GPIO_PAR_LCDDATA_PAR_LD7_0(x)          (((x)&0x03)<<0)
+#define MCF_GPIO_PAR_LCDDATA_PAR_LD15_8(x)         (((x)&0x03)<<2)
+#define MCF_GPIO_PAR_LCDDATA_PAR_LD16(x)           (((x)&0x03)<<4)
+#define MCF_GPIO_PAR_LCDDATA_PAR_LD17(x)           (((x)&0x03)<<6)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_LCDCTL */
+#define MCF_GPIO_PAR_LCDCTL_PAR_CLS                (0x0001)
+#define MCF_GPIO_PAR_LCDCTL_PAR_PS                 (0x0002)
+#define MCF_GPIO_PAR_LCDCTL_PAR_REV                (0x0004)
+#define MCF_GPIO_PAR_LCDCTL_PAR_SPL_SPR            (0x0008)
+#define MCF_GPIO_PAR_LCDCTL_PAR_CONTRAST           (0x0010)
+#define MCF_GPIO_PAR_LCDCTL_PAR_LSCLK              (0x0020)
+#define MCF_GPIO_PAR_LCDCTL_PAR_LP_HSYNC           (0x0040)
+#define MCF_GPIO_PAR_LCDCTL_PAR_FLM_VSYNC          (0x0080)
+#define MCF_GPIO_PAR_LCDCTL_PAR_ACD_OE             (0x0100)
+
+/* Bit definitions and macros for MCF_GPIO_PAR_IRQ */
+#define MCF_GPIO_PAR_IRQ_PAR_IRQ1(x)               (((x)&0x0003)<<4)
+#define MCF_GPIO_PAR_IRQ_PAR_IRQ2(x)               (((x)&0x0003)<<6)
+#define MCF_GPIO_PAR_IRQ_PAR_IRQ4(x)               (((x)&0x0003)<<8)
+#define MCF_GPIO_PAR_IRQ_PAR_IRQ5(x)               (((x)&0x0003)<<10)
+#define MCF_GPIO_PAR_IRQ_PAR_IRQ6(x)               (((x)&0x0003)<<12)
+
+/* Bit definitions and macros for MCF_GPIO_MSCR_FLEXBUS */
+#define MCF_GPIO_MSCR_FLEXBUS_MSCR_ADDRCTL(x)      (((x)&0x03)<<0)
+#define MCF_GPIO_MSCR_FLEXBUS_MSCR_DLOWER(x)       (((x)&0x03)<<2)
+#define MCF_GPIO_MSCR_FLEXBUS_MSCR_DUPPER(x)       (((x)&0x03)<<4)
+
+/* Bit definitions and macros for MCF_GPIO_MSCR_SDRAM */
+#define MCF_GPIO_MSCR_SDRAM_MSCR_SDRAM(x)          (((x)&0x03)<<0)
+#define MCF_GPIO_MSCR_SDRAM_MSCR_SDCLK(x)          (((x)&0x03)<<2)
+#define MCF_GPIO_MSCR_SDRAM_MSCR_SDCLKB(x)         (((x)&0x03)<<4)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_I2C */
+#define MCF_GPIO_DSCR_I2C_I2C_DSE(x)               (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_PWM */
+#define MCF_GPIO_DSCR_PWM_PWM_DSE(x)               (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_FEC */
+#define MCF_GPIO_DSCR_FEC_FEC_DSE(x)               (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_UART */
+#define MCF_GPIO_DSCR_UART_UART0_DSE(x)            (((x)&0x03)<<0)
+#define MCF_GPIO_DSCR_UART_UART1_DSE(x)            (((x)&0x03)<<2)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_QSPI */
+#define MCF_GPIO_DSCR_QSPI_QSPI_DSE(x)             (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_TIMER */
+#define MCF_GPIO_DSCR_TIMER_TIMER_DSE(x)           (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_SSI */
+#define MCF_GPIO_DSCR_SSI_SSI_DSE(x)               (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_LCD */
+#define MCF_GPIO_DSCR_LCD_LCD_DSE(x)               (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_DEBUG */
+#define MCF_GPIO_DSCR_DEBUG_DEBUG_DSE(x)           (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_CLKRST */
+#define MCF_GPIO_DSCR_CLKRST_CLKRST_DSE(x)         (((x)&0x03)<<0)
+
+/* Bit definitions and macros for MCF_GPIO_DSCR_IRQ */
+#define MCF_GPIO_DSCR_IRQ_IRQ_DSE(x)               (((x)&0x03)<<0)
+
+/*
+ * Generic GPIO support
+ */
+#define MCFGPIO_PODR                   MCFGPIO_PODR_FECH
+#define MCFGPIO_PDDR                   MCFGPIO_PDDR_FECH
+#define MCFGPIO_PPDR                   MCFGPIO_PPDSDR_FECH
+#define MCFGPIO_SETR                   MCFGPIO_PPDSDR_FECH
+#define MCFGPIO_CLRR                   MCFGPIO_PCLRR_FECH
+
+#define MCFGPIO_PIN_MAX                        136
+#define MCFGPIO_IRQ_MAX                        8
+#define MCFGPIO_IRQ_VECBASE            MCFINT_VECBASE
+
+/*********************************************************************
+ *
+ * Phase Locked Loop (PLL)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_PLL_PODR              0xFC0C0000
+#define MCF_PLL_PLLCR             0xFC0C0004
+#define MCF_PLL_PMDR              0xFC0C0008
+#define MCF_PLL_PFDR              0xFC0C000C
+
+/* Bit definitions and macros for MCF_PLL_PODR */
+#define MCF_PLL_PODR_BUSDIV(x)    (((x)&0x0F)<<0)
+#define MCF_PLL_PODR_CPUDIV(x)    (((x)&0x0F)<<4)
+
+/* Bit definitions and macros for MCF_PLL_PLLCR */
+#define MCF_PLL_PLLCR_DITHDEV(x)  (((x)&0x07)<<0)
+#define MCF_PLL_PLLCR_DITHEN      (0x80)
+
+/* Bit definitions and macros for MCF_PLL_PMDR */
+#define MCF_PLL_PMDR_MODDIV(x)    (((x)&0xFF)<<0)
+
+/* Bit definitions and macros for MCF_PLL_PFDR */
+#define MCF_PLL_PFDR_MFD(x)       (((x)&0xFF)<<0)
+
+/*********************************************************************
+ *
+ * System Control Module Registers (SCM)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_SCM_MPR                    0xFC000000
+#define MCF_SCM_PACRA                  0xFC000020
+#define MCF_SCM_PACRB                  0xFC000024
+#define MCF_SCM_PACRC                  0xFC000028
+#define MCF_SCM_PACRD                  0xFC00002C
+#define MCF_SCM_PACRE                  0xFC000040
+#define MCF_SCM_PACRF                  0xFC000044
+
+#define MCF_SCM_BCR                    0xFC040024
+
+/*********************************************************************
+ *
+ * SDRAM Controller (SDRAMC)
+ *
+ *********************************************************************/
+
+/* Register read/write macros */
+#define MCF_SDRAMC_SDMR                        0xFC0B8000
+#define MCF_SDRAMC_SDCR                        0xFC0B8004
+#define MCF_SDRAMC_SDCFG1              0xFC0B8008
+#define MCF_SDRAMC_SDCFG2              0xFC0B800C
+#define MCF_SDRAMC_LIMP_FIX            0xFC0B8080
+#define MCF_SDRAMC_SDDS                        0xFC0B8100
+#define MCF_SDRAMC_SDCS0               0xFC0B8110
+#define MCF_SDRAMC_SDCS1               0xFC0B8114
+#define MCF_SDRAMC_SDCS2               0xFC0B8118
+#define MCF_SDRAMC_SDCS3               0xFC0B811C
+
+/* Bit definitions and macros for MCF_SDRAMC_SDMR */
+#define MCF_SDRAMC_SDMR_CMD            (0x00010000)
+#define MCF_SDRAMC_SDMR_AD(x)          (((x)&0x00000FFF)<<18)
+#define MCF_SDRAMC_SDMR_BNKAD(x)       (((x)&0x00000003)<<30)
+#define MCF_SDRAMC_SDMR_BNKAD_LMR      (0x00000000)
+#define MCF_SDRAMC_SDMR_BNKAD_LEMR     (0x40000000)
+
+/* Bit definitions and macros for MCF_SDRAMC_SDCR */
+#define MCF_SDRAMC_SDCR_IPALL          (0x00000002)
+#define MCF_SDRAMC_SDCR_IREF           (0x00000004)
+#define MCF_SDRAMC_SDCR_DQS_OE(x)      (((x)&0x0000000F)<<8)
+#define MCF_SDRAMC_SDCR_PS(x)          (((x)&0x00000003)<<12)
+#define MCF_SDRAMC_SDCR_RCNT(x)                (((x)&0x0000003F)<<16)
+#define MCF_SDRAMC_SDCR_OE_RULE                (0x00400000)
+#define MCF_SDRAMC_SDCR_MUX(x)         (((x)&0x00000003)<<24)
+#define MCF_SDRAMC_SDCR_REF            (0x10000000)
+#define MCF_SDRAMC_SDCR_DDR            (0x20000000)
+#define MCF_SDRAMC_SDCR_CKE            (0x40000000)
+#define MCF_SDRAMC_SDCR_MODE_EN                (0x80000000)
+#define MCF_SDRAMC_SDCR_PS_16          (0x00002000)
+#define MCF_SDRAMC_SDCR_PS_32          (0x00000000)
+
+/* Bit definitions and macros for MCF_SDRAMC_SDCFG1 */
+#define MCF_SDRAMC_SDCFG1_WTLAT(x)     (((x)&0x00000007)<<4)
+#define MCF_SDRAMC_SDCFG1_REF2ACT(x)   (((x)&0x0000000F)<<8)
+#define MCF_SDRAMC_SDCFG1_PRE2ACT(x)   (((x)&0x00000007)<<12)
+#define MCF_SDRAMC_SDCFG1_ACT2RW(x)    (((x)&0x00000007)<<16)
+#define MCF_SDRAMC_SDCFG1_RDLAT(x)     (((x)&0x0000000F)<<20)
+#define MCF_SDRAMC_SDCFG1_SWT2RD(x)    (((x)&0x00000007)<<24)
+#define MCF_SDRAMC_SDCFG1_SRD2RW(x)    (((x)&0x0000000F)<<28)
+
+/* Bit definitions and macros for MCF_SDRAMC_SDCFG2 */
+#define MCF_SDRAMC_SDCFG2_BL(x)                (((x)&0x0000000F)<<16)
+#define MCF_SDRAMC_SDCFG2_BRD2WT(x)    (((x)&0x0000000F)<<20)
+#define MCF_SDRAMC_SDCFG2_BWT2RW(x)    (((x)&0x0000000F)<<24)
+#define MCF_SDRAMC_SDCFG2_BRD2PRE(x)   (((x)&0x0000000F)<<28)
+
+/* Device Errata - LIMP mode work around */
+#define MCF_SDRAMC_REFRESH             (0x40000000)
+
+/* Bit definitions and macros for MCF_SDRAMC_SDDS */
+#define MCF_SDRAMC_SDDS_SB_D(x)                (((x)&0x00000003)<<0)
+#define MCF_SDRAMC_SDDS_SB_S(x)                (((x)&0x00000003)<<2)
+#define MCF_SDRAMC_SDDS_SB_A(x)                (((x)&0x00000003)<<4)
+#define MCF_SDRAMC_SDDS_SB_C(x)                (((x)&0x00000003)<<6)
+#define MCF_SDRAMC_SDDS_SB_E(x)                (((x)&0x00000003)<<8)
+
+/* Bit definitions and macros for MCF_SDRAMC_SDCS */
+#define MCF_SDRAMC_SDCS_CSSZ(x)                (((x)&0x0000001F)<<0)
+#define MCF_SDRAMC_SDCS_BASE(x)                (((x)&0x00000FFF)<<20)
+#define MCF_SDRAMC_SDCS_BA(x)          ((x)&0xFFF00000)
+#define MCF_SDRAMC_SDCS_CSSZ_DIABLE    (0x00000000)
+#define MCF_SDRAMC_SDCS_CSSZ_1MBYTE    (0x00000013)
+#define MCF_SDRAMC_SDCS_CSSZ_2MBYTE    (0x00000014)
+#define MCF_SDRAMC_SDCS_CSSZ_4MBYTE    (0x00000015)
+#define MCF_SDRAMC_SDCS_CSSZ_8MBYTE    (0x00000016)
+#define MCF_SDRAMC_SDCS_CSSZ_16MBYTE   (0x00000017)
+#define MCF_SDRAMC_SDCS_CSSZ_32MBYTE   (0x00000018)
+#define MCF_SDRAMC_SDCS_CSSZ_64MBYTE   (0x00000019)
+#define MCF_SDRAMC_SDCS_CSSZ_128MBYTE  (0x0000001A)
+#define MCF_SDRAMC_SDCS_CSSZ_256MBYTE  (0x0000001B)
+#define MCF_SDRAMC_SDCS_CSSZ_512MBYTE  (0x0000001C)
+#define MCF_SDRAMC_SDCS_CSSZ_1GBYTE    (0x0000001D)
+#define MCF_SDRAMC_SDCS_CSSZ_2GBYTE    (0x0000001E)
+#define MCF_SDRAMC_SDCS_CSSZ_4GBYTE    (0x0000001F)
+
+/*
+ * Edge Port Module (EPORT)
+ */
+#define MCFEPORT_EPPAR                (0xFC094000)
+#define MCFEPORT_EPDDR                (0xFC094002)
+#define MCFEPORT_EPIER                (0xFC094003)
+#define MCFEPORT_EPDR                 (0xFC094004)
+#define MCFEPORT_EPPDR                (0xFC094005)
+#define MCFEPORT_EPFR                 (0xFC094006)
+
+/********************************************************************/
+#endif /* m53xxsim_h */
index 192bbfeabf70c1047288286b37a7ee686d0d32f0..6d13cae44af530240b99b83e4248288ee939ae69 100644 (file)
  */
 #define ACR0_MODE      (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \
                         ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP)
+#if defined(CONFIG_CACHE_COPYBACK)
 #define ACR1_MODE      (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
-                        ACR_ENABLE+ACR_SUPER+ACR_SP)
+                        ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_CP)
+#else
+#define ACR1_MODE      (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
+                        ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_WT)
+#endif
 #define ACR2_MODE      0
 #define ACR3_MODE      (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
                         ACR_ENABLE+ACR_SUPER+ACR_SP)
index fa1059f50dfcd1bef9b98c0b482839325c642ddb..c41ebf45f1d0f576a384d1a789fc2940b3e3a460 100644 (file)
@@ -104,7 +104,7 @@ static inline void gpio_free(unsigned gpio)
 #if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
     defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
     defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M532x) || defined(CONFIG_M54xx) || \
+    defined(CONFIG_M53xx) || defined(CONFIG_M54xx) || \
     defined(CONFIG_M5441x)
 
 /* These parts have GPIO organized by 8 bit ports */
@@ -139,7 +139,7 @@ static inline void gpio_free(unsigned gpio)
 
 #if defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
     defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+    defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 /*
  * These parts have an 'Edge' Port module (external interrupt/GPIO) which uses
  * read-modify-write to change an output and a GPIO module which has separate
@@ -195,7 +195,7 @@ static inline u32 __mcfgpio_ppdr(unsigned gpio)
                return MCFSIM2_GPIO1READ;
 #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
       defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-      defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+      defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #if !defined(CONFIG_M5441x)
        if (gpio < 8)
                return MCFEPORT_EPPDR;
@@ -237,7 +237,7 @@ static inline u32 __mcfgpio_podr(unsigned gpio)
                return MCFSIM2_GPIO1WRITE;
 #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
       defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-      defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+      defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #if !defined(CONFIG_M5441x)
        if (gpio < 8)
                return MCFEPORT_EPDR;
@@ -279,7 +279,7 @@ static inline u32 __mcfgpio_pddr(unsigned gpio)
                return MCFSIM2_GPIO1ENABLE;
 #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
       defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-      defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+      defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #if !defined(CONFIG_M5441x)
        if (gpio < 8)
                return MCFEPORT_EPDDR;
index a04fd9b2714c7d39b1365337b63ee55942a1f9f3..bc867de8a1e9a232f879c7c12eaf042a04e7c43e 100644 (file)
@@ -36,8 +36,8 @@
 #elif defined(CONFIG_M5307)
 #include <asm/m5307sim.h>
 #include <asm/mcfintc.h>
-#elif defined(CONFIG_M532x)
-#include <asm/m532xsim.h>
+#elif defined(CONFIG_M53xx)
+#include <asm/m53xxsim.h>
 #elif defined(CONFIG_M5407)
 #include <asm/m5407sim.h>
 #include <asm/mcfintc.h>
index da2fa43c2e458438ec61c6246faf5379ae9f356c..089f0f150bbfc06169a720e2ddb9b9d3d563658c 100644 (file)
@@ -19,7 +19,7 @@
 #define        MCFTIMER_TRR            0x04            /* Timer Reference (r/w) */
 #define        MCFTIMER_TCR            0x08            /* Timer Capture reg (r/w) */
 #define        MCFTIMER_TCN            0x0C            /* Timer Counter reg (r/w) */
-#if defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #define        MCFTIMER_TER            0x03            /* Timer Event reg (r/w) */
 #else
 #define        MCFTIMER_TER            0x11            /* Timer Event reg (r/w) */
index 02591a109f8c0965ffea5116fbfec998078a6109..68f0fac60099277eac637851f3415e092c8fb921 100644 (file)
@@ -25,7 +25,7 @@ obj-$(CONFIG_M527x)   += m527x.o pit.o intc-2.o reset.o
 obj-$(CONFIG_M5272)    += m5272.o intc-5272.o timers.o
 obj-$(CONFIG_M528x)    += m528x.o pit.o intc-2.o reset.o
 obj-$(CONFIG_M5307)    += m5307.o timers.o intc.o reset.o
-obj-$(CONFIG_M532x)    += m532x.o timers.o intc-simr.o reset.o
+obj-$(CONFIG_M53xx)    += m53xx.o timers.o intc-simr.o reset.o
 obj-$(CONFIG_M5407)    += m5407.o timers.o intc.o reset.o
 obj-$(CONFIG_M54xx)    += m54xx.o sltimers.o intc-2.o
 obj-$(CONFIG_M5441x)   += m5441x.o pit.o intc-simr.o reset.o
diff --git a/arch/m68k/platform/coldfire/m532x.c b/arch/m68k/platform/coldfire/m532x.c
deleted file mode 100644 (file)
index 7951d1d..0000000
+++ /dev/null
@@ -1,593 +0,0 @@
-/***************************************************************************/
-
-/*
- *     linux/arch/m68knommu/platform/532x/config.c
- *
- *     Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
- *     Copyright (C) 2000, Lineo (www.lineo.com)
- *     Yaroslav Vinogradov yaroslav.vinogradov@freescale.com
- *     Copyright Freescale Semiconductor, Inc 2006
- *     Copyright (c) 2006, emlix, Sebastian Hess <shess@hessware.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-/***************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <asm/machdep.h>
-#include <asm/coldfire.h>
-#include <asm/mcfsim.h>
-#include <asm/mcfuart.h>
-#include <asm/mcfdma.h>
-#include <asm/mcfwdebug.h>
-#include <asm/mcfclk.h>
-
-/***************************************************************************/
-
-DEFINE_CLK(0, "flexbus", 2, MCF_CLK);
-DEFINE_CLK(0, "mcfcan.0", 8, MCF_CLK);
-DEFINE_CLK(0, "fec.0", 12, MCF_CLK);
-DEFINE_CLK(0, "edma", 17, MCF_CLK);
-DEFINE_CLK(0, "intc.0", 18, MCF_CLK);
-DEFINE_CLK(0, "intc.1", 19, MCF_CLK);
-DEFINE_CLK(0, "iack.0", 21, MCF_CLK);
-DEFINE_CLK(0, "mcfi2c.0", 22, MCF_CLK);
-DEFINE_CLK(0, "mcfqspi.0", 23, MCF_CLK);
-DEFINE_CLK(0, "mcfuart.0", 24, MCF_BUSCLK);
-DEFINE_CLK(0, "mcfuart.1", 25, MCF_BUSCLK);
-DEFINE_CLK(0, "mcfuart.2", 26, MCF_BUSCLK);
-DEFINE_CLK(0, "mcftmr.0", 28, MCF_CLK);
-DEFINE_CLK(0, "mcftmr.1", 29, MCF_CLK);
-DEFINE_CLK(0, "mcftmr.2", 30, MCF_CLK);
-DEFINE_CLK(0, "mcftmr.3", 31, MCF_CLK);
-
-DEFINE_CLK(0, "mcfpit.0", 32, MCF_CLK);
-DEFINE_CLK(0, "mcfpit.1", 33, MCF_CLK);
-DEFINE_CLK(0, "mcfpit.2", 34, MCF_CLK);
-DEFINE_CLK(0, "mcfpit.3", 35, MCF_CLK);
-DEFINE_CLK(0, "mcfpwm.0", 36, MCF_CLK);
-DEFINE_CLK(0, "mcfeport.0", 37, MCF_CLK);
-DEFINE_CLK(0, "mcfwdt.0", 38, MCF_CLK);
-DEFINE_CLK(0, "sys.0", 40, MCF_BUSCLK);
-DEFINE_CLK(0, "gpio.0", 41, MCF_BUSCLK);
-DEFINE_CLK(0, "mcfrtc.0", 42, MCF_CLK);
-DEFINE_CLK(0, "mcflcd.0", 43, MCF_CLK);
-DEFINE_CLK(0, "mcfusb-otg.0", 44, MCF_CLK);
-DEFINE_CLK(0, "mcfusb-host.0", 45, MCF_CLK);
-DEFINE_CLK(0, "sdram.0", 46, MCF_CLK);
-DEFINE_CLK(0, "ssi.0", 47, MCF_CLK);
-DEFINE_CLK(0, "pll.0", 48, MCF_CLK);
-
-DEFINE_CLK(1, "mdha.0", 32, MCF_CLK);
-DEFINE_CLK(1, "skha.0", 33, MCF_CLK);
-DEFINE_CLK(1, "rng.0", 34, MCF_CLK);
-
-struct clk *mcf_clks[] = {
-       &__clk_0_2,     /* flexbus */
-       &__clk_0_8,     /* mcfcan.0 */
-       &__clk_0_12,    /* fec.0 */
-       &__clk_0_17,    /* edma */
-       &__clk_0_18,    /* intc.0 */
-       &__clk_0_19,    /* intc.1 */
-       &__clk_0_21,    /* iack.0 */
-       &__clk_0_22,    /* mcfi2c.0 */
-       &__clk_0_23,    /* mcfqspi.0 */
-       &__clk_0_24,    /* mcfuart.0 */
-       &__clk_0_25,    /* mcfuart.1 */
-       &__clk_0_26,    /* mcfuart.2 */
-       &__clk_0_28,    /* mcftmr.0 */
-       &__clk_0_29,    /* mcftmr.1 */
-       &__clk_0_30,    /* mcftmr.2 */
-       &__clk_0_31,    /* mcftmr.3 */
-
-       &__clk_0_32,    /* mcfpit.0 */
-       &__clk_0_33,    /* mcfpit.1 */
-       &__clk_0_34,    /* mcfpit.2 */
-       &__clk_0_35,    /* mcfpit.3 */
-       &__clk_0_36,    /* mcfpwm.0 */
-       &__clk_0_37,    /* mcfeport.0 */
-       &__clk_0_38,    /* mcfwdt.0 */
-       &__clk_0_40,    /* sys.0 */
-       &__clk_0_41,    /* gpio.0 */
-       &__clk_0_42,    /* mcfrtc.0 */
-       &__clk_0_43,    /* mcflcd.0 */
-       &__clk_0_44,    /* mcfusb-otg.0 */
-       &__clk_0_45,    /* mcfusb-host.0 */
-       &__clk_0_46,    /* sdram.0 */
-       &__clk_0_47,    /* ssi.0 */
-       &__clk_0_48,    /* pll.0 */
-
-       &__clk_1_32,    /* mdha.0 */
-       &__clk_1_33,    /* skha.0 */
-       &__clk_1_34,    /* rng.0 */
-       NULL,
-};
-
-static struct clk * const enable_clks[] __initconst = {
-       &__clk_0_2,     /* flexbus */
-       &__clk_0_18,    /* intc.0 */
-       &__clk_0_19,    /* intc.1 */
-       &__clk_0_21,    /* iack.0 */
-       &__clk_0_24,    /* mcfuart.0 */
-       &__clk_0_25,    /* mcfuart.1 */
-       &__clk_0_26,    /* mcfuart.2 */
-
-       &__clk_0_32,    /* mcfpit.0 */
-       &__clk_0_33,    /* mcfpit.1 */
-       &__clk_0_37,    /* mcfeport.0 */
-       &__clk_0_40,    /* sys.0 */
-       &__clk_0_41,    /* gpio.0 */
-       &__clk_0_46,    /* sdram.0 */
-       &__clk_0_48,    /* pll.0 */
-};
-
-static struct clk * const disable_clks[] __initconst = {
-       &__clk_0_8,     /* mcfcan.0 */
-       &__clk_0_12,    /* fec.0 */
-       &__clk_0_17,    /* edma */
-       &__clk_0_22,    /* mcfi2c.0 */
-       &__clk_0_23,    /* mcfqspi.0 */
-       &__clk_0_28,    /* mcftmr.0 */
-       &__clk_0_29,    /* mcftmr.1 */
-       &__clk_0_30,    /* mcftmr.2 */
-       &__clk_0_31,    /* mcftmr.3 */
-       &__clk_0_34,    /* mcfpit.2 */
-       &__clk_0_35,    /* mcfpit.3 */
-       &__clk_0_36,    /* mcfpwm.0 */
-       &__clk_0_38,    /* mcfwdt.0 */
-       &__clk_0_42,    /* mcfrtc.0 */
-       &__clk_0_43,    /* mcflcd.0 */
-       &__clk_0_44,    /* mcfusb-otg.0 */
-       &__clk_0_45,    /* mcfusb-host.0 */
-       &__clk_0_47,    /* ssi.0 */
-       &__clk_1_32,    /* mdha.0 */
-       &__clk_1_33,    /* skha.0 */
-       &__clk_1_34,    /* rng.0 */
-};
-
-
-static void __init m532x_clk_init(void)
-{
-       unsigned i;
-
-       /* make sure these clocks are enabled */
-       for (i = 0; i < ARRAY_SIZE(enable_clks); ++i)
-               __clk_init_enabled(enable_clks[i]);
-       /* make sure these clocks are disabled */
-       for (i = 0; i < ARRAY_SIZE(disable_clks); ++i)
-               __clk_init_disabled(disable_clks[i]);
-}
-
-/***************************************************************************/
-
-#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
-
-static void __init m532x_qspi_init(void)
-{
-       /* setup QSPS pins for QSPI with gpio CS control */
-       writew(0x01f0, MCFGPIO_PAR_QSPI);
-}
-
-#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
-
-/***************************************************************************/
-
-static void __init m532x_uarts_init(void)
-{
-       /* UART GPIO initialization */
-       writew(readw(MCFGPIO_PAR_UART) | 0x0FFF, MCFGPIO_PAR_UART);
-}
-
-/***************************************************************************/
-
-static void __init m532x_fec_init(void)
-{
-       u8 v;
-
-       /* Set multi-function pins to ethernet mode for fec0 */
-       v = readb(MCFGPIO_PAR_FECI2C);
-       v |= MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC |
-               MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO;
-       writeb(v, MCFGPIO_PAR_FECI2C);
-
-       v = readb(MCFGPIO_PAR_FEC);
-       v = MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC | MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC;
-       writeb(v, MCFGPIO_PAR_FEC);
-}
-
-/***************************************************************************/
-
-void __init config_BSP(char *commandp, int size)
-{
-#if !defined(CONFIG_BOOTPARAM)
-       /* Copy command line from FLASH to local buffer... */
-       memcpy(commandp, (char *) 0x4000, 4);
-       if(strncmp(commandp, "kcl ", 4) == 0){
-               memcpy(commandp, (char *) 0x4004, size);
-               commandp[size-1] = 0;
-       } else {
-               memset(commandp, 0, size);
-       }
-#endif
-       mach_sched_init = hw_timer_init;
-       m532x_clk_init();
-       m532x_uarts_init();
-       m532x_fec_init();
-#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
-       m532x_qspi_init();
-#endif
-
-#ifdef CONFIG_BDM_DISABLE
-       /*
-        * Disable the BDM clocking.  This also turns off most of the rest of
-        * the BDM device.  This is good for EMC reasons. This option is not
-        * incompatible with the memory protection option.
-        */
-       wdebug(MCFDEBUG_CSR, MCFDEBUG_CSR_PSTCLK);
-#endif
-}
-
-/***************************************************************************/
-/* Board initialization */
-/***************************************************************************/
-/* 
- * PLL min/max specifications
- */
-#define MAX_FVCO       500000  /* KHz */
-#define MAX_FSYS       80000   /* KHz */
-#define MIN_FSYS       58333   /* KHz */
-#define FREF           16000   /* KHz */
-
-
-#define MAX_MFD                135     /* Multiplier */
-#define MIN_MFD                88      /* Multiplier */
-#define BUSDIV         6       /* Divider */
-
-/*
- * Low Power Divider specifications
- */
-#define MIN_LPD                (1 << 0)    /* Divider (not encoded) */
-#define MAX_LPD                (1 << 15)   /* Divider (not encoded) */
-#define DEFAULT_LPD    (1 << 1)        /* Divider (not encoded) */
-
-#define SYS_CLK_KHZ    80000
-#define SYSTEM_PERIOD  12.5
-/*
- *  SDRAM Timing Parameters
- */  
-#define SDRAM_BL       8       /* # of beats in a burst */
-#define SDRAM_TWR      2       /* in clocks */
-#define SDRAM_CASL     2.5     /* CASL in clocks */
-#define SDRAM_TRCD     2       /* in clocks */
-#define SDRAM_TRP      2       /* in clocks */
-#define SDRAM_TRFC     7       /* in clocks */
-#define SDRAM_TREFI    7800    /* in ns */
-
-#define EXT_SRAM_ADDRESS       (0xC0000000)
-#define FLASH_ADDRESS          (0x00000000)
-#define SDRAM_ADDRESS          (0x40000000)
-
-#define NAND_FLASH_ADDRESS     (0xD0000000)
-
-int sys_clk_khz = 0;
-int sys_clk_mhz = 0;
-
-void wtm_init(void);
-void scm_init(void);
-void gpio_init(void);
-void fbcs_init(void);
-void sdramc_init(void);
-int  clock_pll (int fsys, int flags);
-int  clock_limp (int);
-int  clock_exit_limp (void);
-int  get_sys_clock (void);
-
-asmlinkage void __init sysinit(void)
-{
-       sys_clk_khz = clock_pll(0, 0);
-       sys_clk_mhz = sys_clk_khz/1000;
-       
-       wtm_init();
-       scm_init();
-       gpio_init();
-       fbcs_init();
-       sdramc_init();
-}
-
-void wtm_init(void)
-{
-       /* Disable watchdog timer */
-       writew(0, MCF_WTM_WCR);
-}
-
-#define MCF_SCM_BCR_GBW                (0x00000100)
-#define MCF_SCM_BCR_GBR                (0x00000200)
-
-void scm_init(void)
-{
-       /* All masters are trusted */
-       writel(0x77777777, MCF_SCM_MPR);
-    
-       /* Allow supervisor/user, read/write, and trusted/untrusted
-          access to all slaves */
-       writel(0, MCF_SCM_PACRA);
-       writel(0, MCF_SCM_PACRB);
-       writel(0, MCF_SCM_PACRC);
-       writel(0, MCF_SCM_PACRD);
-       writel(0, MCF_SCM_PACRE);
-       writel(0, MCF_SCM_PACRF);
-
-       /* Enable bursts */
-       writel(MCF_SCM_BCR_GBR | MCF_SCM_BCR_GBW, MCF_SCM_BCR);
-}
-
-
-void fbcs_init(void)
-{
-       writeb(0x3E, MCFGPIO_PAR_CS);
-
-       /* Latch chip select */
-       writel(0x10080000, MCF_FBCS1_CSAR);
-
-       writel(0x002A3780, MCF_FBCS1_CSCR);
-       writel(MCF_FBCS_CSMR_BAM_2M | MCF_FBCS_CSMR_V, MCF_FBCS1_CSMR);
-
-       /* Initialize latch to drive signals to inactive states */
-       writew(0xffff, 0x10080000);
-
-       /* External SRAM */
-       writel(EXT_SRAM_ADDRESS, MCF_FBCS1_CSAR);
-       writel(MCF_FBCS_CSCR_PS_16 |
-               MCF_FBCS_CSCR_AA |
-               MCF_FBCS_CSCR_SBM |
-               MCF_FBCS_CSCR_WS(1),
-               MCF_FBCS1_CSCR);
-       writel(MCF_FBCS_CSMR_BAM_512K | MCF_FBCS_CSMR_V, MCF_FBCS1_CSMR);
-
-       /* Boot Flash connected to FBCS0 */
-       writel(FLASH_ADDRESS, MCF_FBCS0_CSAR);
-       writel(MCF_FBCS_CSCR_PS_16 |
-               MCF_FBCS_CSCR_BEM |
-               MCF_FBCS_CSCR_AA |
-               MCF_FBCS_CSCR_SBM |
-               MCF_FBCS_CSCR_WS(7),
-               MCF_FBCS0_CSCR);
-       writel(MCF_FBCS_CSMR_BAM_32M | MCF_FBCS_CSMR_V, MCF_FBCS0_CSMR);
-}
-
-void sdramc_init(void)
-{
-       /*
-        * Check to see if the SDRAM has already been initialized
-        * by a run control tool
-        */
-       if (!(readl(MCF_SDRAMC_SDCR) & MCF_SDRAMC_SDCR_REF)) {
-               /* SDRAM chip select initialization */
-               
-               /* Initialize SDRAM chip select */
-               writel(MCF_SDRAMC_SDCS_BA(SDRAM_ADDRESS) |
-                       MCF_SDRAMC_SDCS_CSSZ(MCF_SDRAMC_SDCS_CSSZ_32MBYTE),
-                       MCF_SDRAMC_SDCS0);
-
-       /*
-        * Basic configuration and initialization
-        */
-       writel(MCF_SDRAMC_SDCFG1_SRD2RW((int)((SDRAM_CASL + 2) + 0.5)) |
-               MCF_SDRAMC_SDCFG1_SWT2RD(SDRAM_TWR + 1) |
-               MCF_SDRAMC_SDCFG1_RDLAT((int)((SDRAM_CASL * 2) + 2)) |
-               MCF_SDRAMC_SDCFG1_ACT2RW((int)(SDRAM_TRCD + 0.5)) |
-               MCF_SDRAMC_SDCFG1_PRE2ACT((int)(SDRAM_TRP + 0.5)) |
-               MCF_SDRAMC_SDCFG1_REF2ACT((int)(SDRAM_TRFC + 0.5)) |
-               MCF_SDRAMC_SDCFG1_WTLAT(3),
-               MCF_SDRAMC_SDCFG1);
-       writel(MCF_SDRAMC_SDCFG2_BRD2PRE(SDRAM_BL / 2 + 1) |
-               MCF_SDRAMC_SDCFG2_BWT2RW(SDRAM_BL / 2 + SDRAM_TWR) |
-               MCF_SDRAMC_SDCFG2_BRD2WT((int)((SDRAM_CASL + SDRAM_BL / 2 - 1.0) + 0.5)) |
-               MCF_SDRAMC_SDCFG2_BL(SDRAM_BL - 1),
-               MCF_SDRAMC_SDCFG2);
-
-            
-       /*
-        * Precharge and enable write to SDMR
-        */
-       writel(MCF_SDRAMC_SDCR_MODE_EN |
-               MCF_SDRAMC_SDCR_CKE |
-               MCF_SDRAMC_SDCR_DDR |
-               MCF_SDRAMC_SDCR_MUX(1) |
-               MCF_SDRAMC_SDCR_RCNT((int)(((SDRAM_TREFI / (SYSTEM_PERIOD * 64)) - 1) + 0.5)) |
-               MCF_SDRAMC_SDCR_PS_16 |
-               MCF_SDRAMC_SDCR_IPALL,
-               MCF_SDRAMC_SDCR);
-
-       /*
-        * Write extended mode register
-        */
-       writel(MCF_SDRAMC_SDMR_BNKAD_LEMR |
-               MCF_SDRAMC_SDMR_AD(0x0) |
-               MCF_SDRAMC_SDMR_CMD,
-               MCF_SDRAMC_SDMR);
-
-       /*
-        * Write mode register and reset DLL
-        */
-       writel(MCF_SDRAMC_SDMR_BNKAD_LMR |
-               MCF_SDRAMC_SDMR_AD(0x163) |
-               MCF_SDRAMC_SDMR_CMD,
-               MCF_SDRAMC_SDMR);
-
-       /*
-        * Execute a PALL command
-        */
-       writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_IPALL, MCF_SDRAMC_SDCR);
-
-       /*
-        * Perform two REF cycles
-        */
-       writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_IREF, MCF_SDRAMC_SDCR);
-       writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_IREF, MCF_SDRAMC_SDCR);
-
-       /*
-        * Write mode register and clear reset DLL
-        */
-       writel(MCF_SDRAMC_SDMR_BNKAD_LMR |
-               MCF_SDRAMC_SDMR_AD(0x063) |
-               MCF_SDRAMC_SDMR_CMD,
-               MCF_SDRAMC_SDMR);
-                               
-       /*
-        * Enable auto refresh and lock SDMR
-        */
-       writel(readl(MCF_SDRAMC_SDCR) & ~MCF_SDRAMC_SDCR_MODE_EN,
-               MCF_SDRAMC_SDCR);
-       writel(MCF_SDRAMC_SDCR_REF | MCF_SDRAMC_SDCR_DQS_OE(0xC),
-               MCF_SDRAMC_SDCR);
-       }
-}
-
-void gpio_init(void)
-{
-       /* Enable UART0 pins */
-       writew(MCF_GPIO_PAR_UART_PAR_URXD0 | MCF_GPIO_PAR_UART_PAR_UTXD0,
-               MCFGPIO_PAR_UART);
-
-       /*
-        * Initialize TIN3 as a GPIO output to enable the write
-        * half of the latch.
-        */
-       writeb(0x00, MCFGPIO_PAR_TIMER);
-       writeb(0x08, MCFGPIO_PDDR_TIMER);
-       writeb(0x00, MCFGPIO_PCLRR_TIMER);
-}
-
-int clock_pll(int fsys, int flags)
-{
-       int fref, temp, fout, mfd;
-       u32 i;
-
-       fref = FREF;
-        
-       if (fsys == 0) {
-               /* Return current PLL output */
-               mfd = readb(MCF_PLL_PFDR);
-
-               return (fref * mfd / (BUSDIV * 4));
-       }
-
-       /* Check bounds of requested system clock */
-       if (fsys > MAX_FSYS)
-               fsys = MAX_FSYS;
-       if (fsys < MIN_FSYS)
-               fsys = MIN_FSYS;
-
-       /* Multiplying by 100 when calculating the temp value,
-          and then dividing by 100 to calculate the mfd allows
-          for exact values without needing to include floating
-          point libraries. */
-       temp = 100 * fsys / fref;
-       mfd = 4 * BUSDIV * temp / 100;
-                       
-       /* Determine the output frequency for selected values */
-       fout = (fref * mfd / (BUSDIV * 4));
-
-       /*
-        * Check to see if the SDRAM has already been initialized.
-        * If it has then the SDRAM needs to be put into self refresh
-        * mode before reprogramming the PLL.
-        */
-       if (readl(MCF_SDRAMC_SDCR) & MCF_SDRAMC_SDCR_REF)
-               /* Put SDRAM into self refresh mode */
-               writel(readl(MCF_SDRAMC_SDCR) & ~MCF_SDRAMC_SDCR_CKE,
-                       MCF_SDRAMC_SDCR);
-
-       /*
-        * Initialize the PLL to generate the new system clock frequency.
-        * The device must be put into LIMP mode to reprogram the PLL.
-        */
-
-       /* Enter LIMP mode */
-       clock_limp(DEFAULT_LPD);
-                                       
-       /* Reprogram PLL for desired fsys */
-       writeb(MCF_PLL_PODR_CPUDIV(BUSDIV/3) | MCF_PLL_PODR_BUSDIV(BUSDIV),
-               MCF_PLL_PODR);
-                                               
-       writeb(mfd, MCF_PLL_PFDR);
-               
-       /* Exit LIMP mode */
-       clock_exit_limp();
-       
-       /*
-        * Return the SDRAM to normal operation if it is in use.
-        */
-       if (readl(MCF_SDRAMC_SDCR) & MCF_SDRAMC_SDCR_REF)
-               /* Exit self refresh mode */
-               writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_CKE,
-                       MCF_SDRAMC_SDCR);
-
-       /* Errata - workaround for SDRAM opeartion after exiting LIMP mode */
-       writel(MCF_SDRAMC_REFRESH, MCF_SDRAMC_LIMP_FIX);
-
-       /* wait for DQS logic to relock */
-       for (i = 0; i < 0x200; i++)
-               ;
-
-       return fout;
-}
-
-int clock_limp(int div)
-{
-       u32 temp;
-
-       /* Check bounds of divider */
-       if (div < MIN_LPD)
-               div = MIN_LPD;
-       if (div > MAX_LPD)
-               div = MAX_LPD;
-    
-       /* Save of the current value of the SSIDIV so we don't
-          overwrite the value*/
-       temp = readw(MCF_CCM_CDR) & MCF_CCM_CDR_SSIDIV(0xF);
-      
-       /* Apply the divider to the system clock */
-       writew(MCF_CCM_CDR_LPDIV(div) | MCF_CCM_CDR_SSIDIV(temp), MCF_CCM_CDR);
-    
-       writew(readw(MCF_CCM_MISCCR) | MCF_CCM_MISCCR_LIMP, MCF_CCM_MISCCR);
-    
-       return (FREF/(3*(1 << div)));
-}
-
-int clock_exit_limp(void)
-{
-       int fout;
-       
-       /* Exit LIMP mode */
-       writew(readw(MCF_CCM_MISCCR) & ~MCF_CCM_MISCCR_LIMP, MCF_CCM_MISCCR);
-
-       /* Wait for PLL to lock */
-       while (!(readw(MCF_CCM_MISCCR) & MCF_CCM_MISCCR_PLL_LOCK))
-               ;
-       
-       fout = get_sys_clock();
-
-       return fout;
-}
-
-int get_sys_clock(void)
-{
-       int divider;
-       
-       /* Test to see if device is in LIMP mode */
-       if (readw(MCF_CCM_MISCCR) & MCF_CCM_MISCCR_LIMP) {
-               divider = readw(MCF_CCM_CDR) & MCF_CCM_CDR_LPDIV(0xF);
-               return (FREF/(2 << divider));
-       }
-       else
-               return (FREF * readb(MCF_PLL_PFDR)) / (BUSDIV * 4);
-}
diff --git a/arch/m68k/platform/coldfire/m53xx.c b/arch/m68k/platform/coldfire/m53xx.c
new file mode 100644 (file)
index 0000000..5286f98
--- /dev/null
@@ -0,0 +1,592 @@
+/***************************************************************************/
+
+/*
+ *     m53xx.c -- platform support for ColdFire 53xx based boards
+ *
+ *     Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
+ *     Copyright (C) 2000, Lineo (www.lineo.com)
+ *     Yaroslav Vinogradov yaroslav.vinogradov@freescale.com
+ *     Copyright Freescale Semiconductor, Inc 2006
+ *     Copyright (c) 2006, emlix, Sebastian Hess <shess@hessware.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/***************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <asm/machdep.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfuart.h>
+#include <asm/mcfdma.h>
+#include <asm/mcfwdebug.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(0, "flexbus", 2, MCF_CLK);
+DEFINE_CLK(0, "mcfcan.0", 8, MCF_CLK);
+DEFINE_CLK(0, "fec.0", 12, MCF_CLK);
+DEFINE_CLK(0, "edma", 17, MCF_CLK);
+DEFINE_CLK(0, "intc.0", 18, MCF_CLK);
+DEFINE_CLK(0, "intc.1", 19, MCF_CLK);
+DEFINE_CLK(0, "iack.0", 21, MCF_CLK);
+DEFINE_CLK(0, "mcfi2c.0", 22, MCF_CLK);
+DEFINE_CLK(0, "mcfqspi.0", 23, MCF_CLK);
+DEFINE_CLK(0, "mcfuart.0", 24, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfuart.1", 25, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfuart.2", 26, MCF_BUSCLK);
+DEFINE_CLK(0, "mcftmr.0", 28, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.1", 29, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.2", 30, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.3", 31, MCF_CLK);
+
+DEFINE_CLK(0, "mcfpit.0", 32, MCF_CLK);
+DEFINE_CLK(0, "mcfpit.1", 33, MCF_CLK);
+DEFINE_CLK(0, "mcfpit.2", 34, MCF_CLK);
+DEFINE_CLK(0, "mcfpit.3", 35, MCF_CLK);
+DEFINE_CLK(0, "mcfpwm.0", 36, MCF_CLK);
+DEFINE_CLK(0, "mcfeport.0", 37, MCF_CLK);
+DEFINE_CLK(0, "mcfwdt.0", 38, MCF_CLK);
+DEFINE_CLK(0, "sys.0", 40, MCF_BUSCLK);
+DEFINE_CLK(0, "gpio.0", 41, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfrtc.0", 42, MCF_CLK);
+DEFINE_CLK(0, "mcflcd.0", 43, MCF_CLK);
+DEFINE_CLK(0, "mcfusb-otg.0", 44, MCF_CLK);
+DEFINE_CLK(0, "mcfusb-host.0", 45, MCF_CLK);
+DEFINE_CLK(0, "sdram.0", 46, MCF_CLK);
+DEFINE_CLK(0, "ssi.0", 47, MCF_CLK);
+DEFINE_CLK(0, "pll.0", 48, MCF_CLK);
+
+DEFINE_CLK(1, "mdha.0", 32, MCF_CLK);
+DEFINE_CLK(1, "skha.0", 33, MCF_CLK);
+DEFINE_CLK(1, "rng.0", 34, MCF_CLK);
+
+struct clk *mcf_clks[] = {
+       &__clk_0_2,     /* flexbus */
+       &__clk_0_8,     /* mcfcan.0 */
+       &__clk_0_12,    /* fec.0 */
+       &__clk_0_17,    /* edma */
+       &__clk_0_18,    /* intc.0 */
+       &__clk_0_19,    /* intc.1 */
+       &__clk_0_21,    /* iack.0 */
+       &__clk_0_22,    /* mcfi2c.0 */
+       &__clk_0_23,    /* mcfqspi.0 */
+       &__clk_0_24,    /* mcfuart.0 */
+       &__clk_0_25,    /* mcfuart.1 */
+       &__clk_0_26,    /* mcfuart.2 */
+       &__clk_0_28,    /* mcftmr.0 */
+       &__clk_0_29,    /* mcftmr.1 */
+       &__clk_0_30,    /* mcftmr.2 */
+       &__clk_0_31,    /* mcftmr.3 */
+
+       &__clk_0_32,    /* mcfpit.0 */
+       &__clk_0_33,    /* mcfpit.1 */
+       &__clk_0_34,    /* mcfpit.2 */
+       &__clk_0_35,    /* mcfpit.3 */
+       &__clk_0_36,    /* mcfpwm.0 */
+       &__clk_0_37,    /* mcfeport.0 */
+       &__clk_0_38,    /* mcfwdt.0 */
+       &__clk_0_40,    /* sys.0 */
+       &__clk_0_41,    /* gpio.0 */
+       &__clk_0_42,    /* mcfrtc.0 */
+       &__clk_0_43,    /* mcflcd.0 */
+       &__clk_0_44,    /* mcfusb-otg.0 */
+       &__clk_0_45,    /* mcfusb-host.0 */
+       &__clk_0_46,    /* sdram.0 */
+       &__clk_0_47,    /* ssi.0 */
+       &__clk_0_48,    /* pll.0 */
+
+       &__clk_1_32,    /* mdha.0 */
+       &__clk_1_33,    /* skha.0 */
+       &__clk_1_34,    /* rng.0 */
+       NULL,
+};
+
+static struct clk * const enable_clks[] __initconst = {
+       &__clk_0_2,     /* flexbus */
+       &__clk_0_18,    /* intc.0 */
+       &__clk_0_19,    /* intc.1 */
+       &__clk_0_21,    /* iack.0 */
+       &__clk_0_24,    /* mcfuart.0 */
+       &__clk_0_25,    /* mcfuart.1 */
+       &__clk_0_26,    /* mcfuart.2 */
+       &__clk_0_28,    /* mcftmr.0 */
+       &__clk_0_29,    /* mcftmr.1 */
+       &__clk_0_32,    /* mcfpit.0 */
+       &__clk_0_33,    /* mcfpit.1 */
+       &__clk_0_37,    /* mcfeport.0 */
+       &__clk_0_40,    /* sys.0 */
+       &__clk_0_41,    /* gpio.0 */
+       &__clk_0_46,    /* sdram.0 */
+       &__clk_0_48,    /* pll.0 */
+};
+
+static struct clk * const disable_clks[] __initconst = {
+       &__clk_0_8,     /* mcfcan.0 */
+       &__clk_0_12,    /* fec.0 */
+       &__clk_0_17,    /* edma */
+       &__clk_0_22,    /* mcfi2c.0 */
+       &__clk_0_23,    /* mcfqspi.0 */
+       &__clk_0_30,    /* mcftmr.2 */
+       &__clk_0_31,    /* mcftmr.3 */
+       &__clk_0_34,    /* mcfpit.2 */
+       &__clk_0_35,    /* mcfpit.3 */
+       &__clk_0_36,    /* mcfpwm.0 */
+       &__clk_0_38,    /* mcfwdt.0 */
+       &__clk_0_42,    /* mcfrtc.0 */
+       &__clk_0_43,    /* mcflcd.0 */
+       &__clk_0_44,    /* mcfusb-otg.0 */
+       &__clk_0_45,    /* mcfusb-host.0 */
+       &__clk_0_47,    /* ssi.0 */
+       &__clk_1_32,    /* mdha.0 */
+       &__clk_1_33,    /* skha.0 */
+       &__clk_1_34,    /* rng.0 */
+};
+
+
+static void __init m53xx_clk_init(void)
+{
+       unsigned i;
+
+       /* make sure these clocks are enabled */
+       for (i = 0; i < ARRAY_SIZE(enable_clks); ++i)
+               __clk_init_enabled(enable_clks[i]);
+       /* make sure these clocks are disabled */
+       for (i = 0; i < ARRAY_SIZE(disable_clks); ++i)
+               __clk_init_disabled(disable_clks[i]);
+}
+
+/***************************************************************************/
+
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
+
+static void __init m53xx_qspi_init(void)
+{
+       /* setup QSPS pins for QSPI with gpio CS control */
+       writew(0x01f0, MCFGPIO_PAR_QSPI);
+}
+
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
+
+/***************************************************************************/
+
+static void __init m53xx_uarts_init(void)
+{
+       /* UART GPIO initialization */
+       writew(readw(MCFGPIO_PAR_UART) | 0x0FFF, MCFGPIO_PAR_UART);
+}
+
+/***************************************************************************/
+
+static void __init m53xx_fec_init(void)
+{
+       u8 v;
+
+       /* Set multi-function pins to ethernet mode for fec0 */
+       v = readb(MCFGPIO_PAR_FECI2C);
+       v |= MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC |
+               MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO;
+       writeb(v, MCFGPIO_PAR_FECI2C);
+
+       v = readb(MCFGPIO_PAR_FEC);
+       v = MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC | MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC;
+       writeb(v, MCFGPIO_PAR_FEC);
+}
+
+/***************************************************************************/
+
+void __init config_BSP(char *commandp, int size)
+{
+#if !defined(CONFIG_BOOTPARAM)
+       /* Copy command line from FLASH to local buffer... */
+       memcpy(commandp, (char *) 0x4000, 4);
+       if(strncmp(commandp, "kcl ", 4) == 0){
+               memcpy(commandp, (char *) 0x4004, size);
+               commandp[size-1] = 0;
+       } else {
+               memset(commandp, 0, size);
+       }
+#endif
+       mach_sched_init = hw_timer_init;
+       m53xx_clk_init();
+       m53xx_uarts_init();
+       m53xx_fec_init();
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
+       m53xx_qspi_init();
+#endif
+
+#ifdef CONFIG_BDM_DISABLE
+       /*
+        * Disable the BDM clocking.  This also turns off most of the rest of
+        * the BDM device.  This is good for EMC reasons. This option is not
+        * incompatible with the memory protection option.
+        */
+       wdebug(MCFDEBUG_CSR, MCFDEBUG_CSR_PSTCLK);
+#endif
+}
+
+/***************************************************************************/
+/* Board initialization */
+/***************************************************************************/
+/* 
+ * PLL min/max specifications
+ */
+#define MAX_FVCO       500000  /* KHz */
+#define MAX_FSYS       80000   /* KHz */
+#define MIN_FSYS       58333   /* KHz */
+#define FREF           16000   /* KHz */
+
+
+#define MAX_MFD                135     /* Multiplier */
+#define MIN_MFD                88      /* Multiplier */
+#define BUSDIV         6       /* Divider */
+
+/*
+ * Low Power Divider specifications
+ */
+#define MIN_LPD                (1 << 0)    /* Divider (not encoded) */
+#define MAX_LPD                (1 << 15)   /* Divider (not encoded) */
+#define DEFAULT_LPD    (1 << 1)        /* Divider (not encoded) */
+
+#define SYS_CLK_KHZ    80000
+#define SYSTEM_PERIOD  12.5
+/*
+ *  SDRAM Timing Parameters
+ */  
+#define SDRAM_BL       8       /* # of beats in a burst */
+#define SDRAM_TWR      2       /* in clocks */
+#define SDRAM_CASL     2.5     /* CASL in clocks */
+#define SDRAM_TRCD     2       /* in clocks */
+#define SDRAM_TRP      2       /* in clocks */
+#define SDRAM_TRFC     7       /* in clocks */
+#define SDRAM_TREFI    7800    /* in ns */
+
+#define EXT_SRAM_ADDRESS       (0xC0000000)
+#define FLASH_ADDRESS          (0x00000000)
+#define SDRAM_ADDRESS          (0x40000000)
+
+#define NAND_FLASH_ADDRESS     (0xD0000000)
+
+int sys_clk_khz = 0;
+int sys_clk_mhz = 0;
+
+void wtm_init(void);
+void scm_init(void);
+void gpio_init(void);
+void fbcs_init(void);
+void sdramc_init(void);
+int  clock_pll (int fsys, int flags);
+int  clock_limp (int);
+int  clock_exit_limp (void);
+int  get_sys_clock (void);
+
+asmlinkage void __init sysinit(void)
+{
+       sys_clk_khz = clock_pll(0, 0);
+       sys_clk_mhz = sys_clk_khz/1000;
+       
+       wtm_init();
+       scm_init();
+       gpio_init();
+       fbcs_init();
+       sdramc_init();
+}
+
+void wtm_init(void)
+{
+       /* Disable watchdog timer */
+       writew(0, MCF_WTM_WCR);
+}
+
+#define MCF_SCM_BCR_GBW                (0x00000100)
+#define MCF_SCM_BCR_GBR                (0x00000200)
+
+void scm_init(void)
+{
+       /* All masters are trusted */
+       writel(0x77777777, MCF_SCM_MPR);
+    
+       /* Allow supervisor/user, read/write, and trusted/untrusted
+          access to all slaves */
+       writel(0, MCF_SCM_PACRA);
+       writel(0, MCF_SCM_PACRB);
+       writel(0, MCF_SCM_PACRC);
+       writel(0, MCF_SCM_PACRD);
+       writel(0, MCF_SCM_PACRE);
+       writel(0, MCF_SCM_PACRF);
+
+       /* Enable bursts */
+       writel(MCF_SCM_BCR_GBR | MCF_SCM_BCR_GBW, MCF_SCM_BCR);
+}
+
+
+void fbcs_init(void)
+{
+       writeb(0x3E, MCFGPIO_PAR_CS);
+
+       /* Latch chip select */
+       writel(0x10080000, MCF_FBCS1_CSAR);
+
+       writel(0x002A3780, MCF_FBCS1_CSCR);
+       writel(MCF_FBCS_CSMR_BAM_2M | MCF_FBCS_CSMR_V, MCF_FBCS1_CSMR);
+
+       /* Initialize latch to drive signals to inactive states */
+       writew(0xffff, 0x10080000);
+
+       /* External SRAM */
+       writel(EXT_SRAM_ADDRESS, MCF_FBCS1_CSAR);
+       writel(MCF_FBCS_CSCR_PS_16 |
+               MCF_FBCS_CSCR_AA |
+               MCF_FBCS_CSCR_SBM |
+               MCF_FBCS_CSCR_WS(1),
+               MCF_FBCS1_CSCR);
+       writel(MCF_FBCS_CSMR_BAM_512K | MCF_FBCS_CSMR_V, MCF_FBCS1_CSMR);
+
+       /* Boot Flash connected to FBCS0 */
+       writel(FLASH_ADDRESS, MCF_FBCS0_CSAR);
+       writel(MCF_FBCS_CSCR_PS_16 |
+               MCF_FBCS_CSCR_BEM |
+               MCF_FBCS_CSCR_AA |
+               MCF_FBCS_CSCR_SBM |
+               MCF_FBCS_CSCR_WS(7),
+               MCF_FBCS0_CSCR);
+       writel(MCF_FBCS_CSMR_BAM_32M | MCF_FBCS_CSMR_V, MCF_FBCS0_CSMR);
+}
+
+void sdramc_init(void)
+{
+       /*
+        * Check to see if the SDRAM has already been initialized
+        * by a run control tool
+        */
+       if (!(readl(MCF_SDRAMC_SDCR) & MCF_SDRAMC_SDCR_REF)) {
+               /* SDRAM chip select initialization */
+               
+               /* Initialize SDRAM chip select */
+               writel(MCF_SDRAMC_SDCS_BA(SDRAM_ADDRESS) |
+                       MCF_SDRAMC_SDCS_CSSZ(MCF_SDRAMC_SDCS_CSSZ_32MBYTE),
+                       MCF_SDRAMC_SDCS0);
+
+       /*
+        * Basic configuration and initialization
+        */
+       writel(MCF_SDRAMC_SDCFG1_SRD2RW((int)((SDRAM_CASL + 2) + 0.5)) |
+               MCF_SDRAMC_SDCFG1_SWT2RD(SDRAM_TWR + 1) |
+               MCF_SDRAMC_SDCFG1_RDLAT((int)((SDRAM_CASL * 2) + 2)) |
+               MCF_SDRAMC_SDCFG1_ACT2RW((int)(SDRAM_TRCD + 0.5)) |
+               MCF_SDRAMC_SDCFG1_PRE2ACT((int)(SDRAM_TRP + 0.5)) |
+               MCF_SDRAMC_SDCFG1_REF2ACT((int)(SDRAM_TRFC + 0.5)) |
+               MCF_SDRAMC_SDCFG1_WTLAT(3),
+               MCF_SDRAMC_SDCFG1);
+       writel(MCF_SDRAMC_SDCFG2_BRD2PRE(SDRAM_BL / 2 + 1) |
+               MCF_SDRAMC_SDCFG2_BWT2RW(SDRAM_BL / 2 + SDRAM_TWR) |
+               MCF_SDRAMC_SDCFG2_BRD2WT((int)((SDRAM_CASL + SDRAM_BL / 2 - 1.0) + 0.5)) |
+               MCF_SDRAMC_SDCFG2_BL(SDRAM_BL - 1),
+               MCF_SDRAMC_SDCFG2);
+
+            
+       /*
+        * Precharge and enable write to SDMR
+        */
+       writel(MCF_SDRAMC_SDCR_MODE_EN |
+               MCF_SDRAMC_SDCR_CKE |
+               MCF_SDRAMC_SDCR_DDR |
+               MCF_SDRAMC_SDCR_MUX(1) |
+               MCF_SDRAMC_SDCR_RCNT((int)(((SDRAM_TREFI / (SYSTEM_PERIOD * 64)) - 1) + 0.5)) |
+               MCF_SDRAMC_SDCR_PS_16 |
+               MCF_SDRAMC_SDCR_IPALL,
+               MCF_SDRAMC_SDCR);
+
+       /*
+        * Write extended mode register
+        */
+       writel(MCF_SDRAMC_SDMR_BNKAD_LEMR |
+               MCF_SDRAMC_SDMR_AD(0x0) |
+               MCF_SDRAMC_SDMR_CMD,
+               MCF_SDRAMC_SDMR);
+
+       /*
+        * Write mode register and reset DLL
+        */
+       writel(MCF_SDRAMC_SDMR_BNKAD_LMR |
+               MCF_SDRAMC_SDMR_AD(0x163) |
+               MCF_SDRAMC_SDMR_CMD,
+               MCF_SDRAMC_SDMR);
+
+       /*
+        * Execute a PALL command
+        */
+       writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_IPALL, MCF_SDRAMC_SDCR);
+
+       /*
+        * Perform two REF cycles
+        */
+       writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_IREF, MCF_SDRAMC_SDCR);
+       writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_IREF, MCF_SDRAMC_SDCR);
+
+       /*
+        * Write mode register and clear reset DLL
+        */
+       writel(MCF_SDRAMC_SDMR_BNKAD_LMR |
+               MCF_SDRAMC_SDMR_AD(0x063) |
+               MCF_SDRAMC_SDMR_CMD,
+               MCF_SDRAMC_SDMR);
+                               
+       /*
+        * Enable auto refresh and lock SDMR
+        */
+       writel(readl(MCF_SDRAMC_SDCR) & ~MCF_SDRAMC_SDCR_MODE_EN,
+               MCF_SDRAMC_SDCR);
+       writel(MCF_SDRAMC_SDCR_REF | MCF_SDRAMC_SDCR_DQS_OE(0xC),
+               MCF_SDRAMC_SDCR);
+       }
+}
+
+void gpio_init(void)
+{
+       /* Enable UART0 pins */
+       writew(MCF_GPIO_PAR_UART_PAR_URXD0 | MCF_GPIO_PAR_UART_PAR_UTXD0,
+               MCFGPIO_PAR_UART);
+
+       /*
+        * Initialize TIN3 as a GPIO output to enable the write
+        * half of the latch.
+        */
+       writeb(0x00, MCFGPIO_PAR_TIMER);
+       writeb(0x08, MCFGPIO_PDDR_TIMER);
+       writeb(0x00, MCFGPIO_PCLRR_TIMER);
+}
+
+int clock_pll(int fsys, int flags)
+{
+       int fref, temp, fout, mfd;
+       u32 i;
+
+       fref = FREF;
+        
+       if (fsys == 0) {
+               /* Return current PLL output */
+               mfd = readb(MCF_PLL_PFDR);
+
+               return (fref * mfd / (BUSDIV * 4));
+       }
+
+       /* Check bounds of requested system clock */
+       if (fsys > MAX_FSYS)
+               fsys = MAX_FSYS;
+       if (fsys < MIN_FSYS)
+               fsys = MIN_FSYS;
+
+       /* Multiplying by 100 when calculating the temp value,
+          and then dividing by 100 to calculate the mfd allows
+          for exact values without needing to include floating
+          point libraries. */
+       temp = 100 * fsys / fref;
+       mfd = 4 * BUSDIV * temp / 100;
+                       
+       /* Determine the output frequency for selected values */
+       fout = (fref * mfd / (BUSDIV * 4));
+
+       /*
+        * Check to see if the SDRAM has already been initialized.
+        * If it has then the SDRAM needs to be put into self refresh
+        * mode before reprogramming the PLL.
+        */
+       if (readl(MCF_SDRAMC_SDCR) & MCF_SDRAMC_SDCR_REF)
+               /* Put SDRAM into self refresh mode */
+               writel(readl(MCF_SDRAMC_SDCR) & ~MCF_SDRAMC_SDCR_CKE,
+                       MCF_SDRAMC_SDCR);
+
+       /*
+        * Initialize the PLL to generate the new system clock frequency.
+        * The device must be put into LIMP mode to reprogram the PLL.
+        */
+
+       /* Enter LIMP mode */
+       clock_limp(DEFAULT_LPD);
+                                       
+       /* Reprogram PLL for desired fsys */
+       writeb(MCF_PLL_PODR_CPUDIV(BUSDIV/3) | MCF_PLL_PODR_BUSDIV(BUSDIV),
+               MCF_PLL_PODR);
+                                               
+       writeb(mfd, MCF_PLL_PFDR);
+               
+       /* Exit LIMP mode */
+       clock_exit_limp();
+       
+       /*
+        * Return the SDRAM to normal operation if it is in use.
+        */
+       if (readl(MCF_SDRAMC_SDCR) & MCF_SDRAMC_SDCR_REF)
+               /* Exit self refresh mode */
+               writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_CKE,
+                       MCF_SDRAMC_SDCR);
+
+       /* Errata - workaround for SDRAM opeartion after exiting LIMP mode */
+       writel(MCF_SDRAMC_REFRESH, MCF_SDRAMC_LIMP_FIX);
+
+       /* wait for DQS logic to relock */
+       for (i = 0; i < 0x200; i++)
+               ;
+
+       return fout;
+}
+
+int clock_limp(int div)
+{
+       u32 temp;
+
+       /* Check bounds of divider */
+       if (div < MIN_LPD)
+               div = MIN_LPD;
+       if (div > MAX_LPD)
+               div = MAX_LPD;
+    
+       /* Save of the current value of the SSIDIV so we don't
+          overwrite the value*/
+       temp = readw(MCF_CCM_CDR) & MCF_CCM_CDR_SSIDIV(0xF);
+      
+       /* Apply the divider to the system clock */
+       writew(MCF_CCM_CDR_LPDIV(div) | MCF_CCM_CDR_SSIDIV(temp), MCF_CCM_CDR);
+    
+       writew(readw(MCF_CCM_MISCCR) | MCF_CCM_MISCCR_LIMP, MCF_CCM_MISCCR);
+    
+       return (FREF/(3*(1 << div)));
+}
+
+int clock_exit_limp(void)
+{
+       int fout;
+       
+       /* Exit LIMP mode */
+       writew(readw(MCF_CCM_MISCCR) & ~MCF_CCM_MISCCR_LIMP, MCF_CCM_MISCCR);
+
+       /* Wait for PLL to lock */
+       while (!(readw(MCF_CCM_MISCCR) & MCF_CCM_MISCCR_PLL_LOCK))
+               ;
+       
+       fout = get_sys_clock();
+
+       return fout;
+}
+
+int get_sys_clock(void)
+{
+       int divider;
+       
+       /* Test to see if device is in LIMP mode */
+       if (readw(MCF_CCM_MISCCR) & MCF_CCM_MISCCR_LIMP) {
+               divider = readw(MCF_CCM_CDR) & MCF_CCM_CDR_LPDIV(0xF);
+               return (FREF/(2 << divider));
+       }
+       else
+               return (FREF * readb(MCF_PLL_PFDR)) / (BUSDIV * 4);
+}
index 51f6d2af807f8dd2c401138bc3894b8ae920f34d..d06068e457643fc804f0b068679da230e640becb 100644 (file)
@@ -36,7 +36,7 @@
  */
 void coldfire_profile_init(void);
 
-#if defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x)
 #define        __raw_readtrr   __raw_readl
 #define        __raw_writetrr  __raw_writel
 #else
index d2b097a652d9193f58917f2a256b4e3af09b540c..3649a8b150c0cf46f0161066a262c9c11e81e9a3 100644 (file)
@@ -17,7 +17,6 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_EFI_PARTITION is not set
-CONFIG_OPT_LIB_ASM=y
 CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
 CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1
 CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1
index 41cc841091b084e73692220324d30fd44ac0b018..d52abb6812fabb082c86196d97be0ee90cf728d0 100644 (file)
@@ -153,7 +153,5 @@ extern void __init xilinx_pci_init(void);
 static inline void __init xilinx_pci_init(void) { return; }
 #endif
 
-#include <asm-generic/pci-dma-compat.h>
-
 #endif /* __KERNEL__ */
 #endif /* __ASM_MICROBLAZE_PCI_H */
index a1ab5f0009efcd7bc84d4dd17aabedb915707653..efe59d881789fbafd7db6bd4933263394995da1f 100644 (file)
@@ -90,17 +90,25 @@ static inline int ___range_ok(unsigned long addr, unsigned long size)
 
 #else
 
-/*
- * Address is valid if:
- *  - "addr", "addr + size" and "size" are all below the limit
- */
-#define access_ok(type, addr, size) \
-       (get_fs().seg >= (((unsigned long)(addr)) | \
-               (size) | ((unsigned long)(addr) + (size))))
-
-/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
- type?"WRITE":"READ",addr,size,get_fs().seg)) */
-
+static inline int access_ok(int type, const void __user *addr,
+                                                       unsigned long size)
+{
+       if (!size)
+               goto ok;
+
+       if ((get_fs().seg < ((unsigned long)addr)) ||
+                       (get_fs().seg < ((unsigned long)addr + size - 1))) {
+               pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+                       type ? "WRITE" : "READ ", (u32)addr, (u32)size,
+                       (u32)get_fs().seg);
+               return 0;
+       }
+ok:
+       pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
+                       type ? "WRITE" : "READ ", (u32)addr, (u32)size,
+                       (u32)get_fs().seg);
+       return 1;
+}
 #endif
 
 #ifdef CONFIG_MMU
index 0b2299bcb94817f59e34399adc20b05e96c0f3bd..410398f6db555a1df3be57620454dc61c47f20fa 100644 (file)
@@ -37,6 +37,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
        {"8.20.a", 0x15},
        {"8.20.b", 0x16},
        {"8.30.a", 0x17},
+       {"8.40.a", 0x18},
+       {"8.40.b", 0x19},
        {NULL, 0},
 };
 
@@ -57,6 +59,9 @@ const struct family_string_key family_string_lookup[] = {
        {"virtex6", 0xe},
        /* FIXME There is no key code defined for spartan2 */
        {"spartan2", 0xf0},
+       {"kintex7", 0x10},
+       {"artix7", 0x11},
+       {"zynq7000", 0x12},
        {NULL, 0},
 };
 
index eef84de5e8c83bbb5bbee4c2db4efa6e488ceded..fcc797feb9dbd5df1b654b1a5ace439c35167553 100644 (file)
@@ -112,16 +112,16 @@ no_fdt_arg:
  * copy command line directly to cmd_line placed in data section.
  */
        beqid   r5, skip        /* Skip if NULL pointer */
-       or      r6, r0, r0              /* incremment */
+       or      r11, r0, r0             /* incremment */
        ori     r4, r0, cmd_line        /* load address of command line */
        tophys(r4,r4)                   /* convert to phys address */
        ori     r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
 _copy_command_line:
        /* r2=r5+r6 - r5 contain pointer to command line */
-       lbu             r2, r5, r6
+       lbu     r2, r5, r11
        beqid   r2, skip                /* Skip if no data */
-       sb              r2, r4, r6              /* addr[r4+r6]= r2*/
-       addik   r6, r6, 1               /* increment counting */
+       sb      r2, r4, r11             /* addr[r4+r6]= r2 */
+       addik   r11, r11, 1             /* increment counting */
        bgtid   r3, _copy_command_line  /* loop for all entries       */
        addik   r3, r3, -1              /* decrement loop */
        addik   r5, r4, 0               /* add new space for command line */
@@ -131,13 +131,13 @@ skip:
 
 #ifdef NOT_COMPILE
 /* save bram context */
-       or      r6, r0, r0                              /* incremment */
+       or      r11, r0, r0                             /* incremment */
        ori     r4, r0, TOPHYS(_bram_load_start)        /* save bram context */
        ori     r3, r0, (LMB_SIZE - 4)
 _copy_bram:
-       lw      r7, r0, r             /* r7 = r0 + r6 */
-       sw      r7, r4, r6              /* addr[r4 + r6] = r7*/
-       addik   r6, r6, 4               /* increment counting */
+       lw      r7, r0, r11             /* r7 = r0 + r6 */
+       sw      r7, r4, r11             /* addr[r4 + r6] = r7 */
+       addik   r11, r11, 4             /* increment counting */
        bgtid   r3, _copy_bram          /* loop for all entries */
        addik   r3, r3, -4              /* descrement loop */
 #endif
@@ -303,8 +303,8 @@ jump_over2:
         * the exception vectors, using a 4k real==virtual mapping.
         */
        /* Use temporary TLB_ID for LMB - clear this temporary mapping later */
-       ori     r6, r0, MICROBLAZE_LMB_TLB_ID
-       mts     rtlbx,r6
+       ori     r11, r0, MICROBLAZE_LMB_TLB_ID
+       mts     rtlbx,r11
 
        ori     r4,r0,(TLB_WR | TLB_EX)
        ori     r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
index 8778adf72bd36ac8b81ed41c15284596079467a4..d85fa3a2b0f828b6023daf3ba7daaa674fb0d863 100644 (file)
@@ -172,4 +172,6 @@ void __init init_IRQ(void)
         * and commits this patch.  ~~gcl */
        root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops,
                                                        (void *)intr_mask);
+
+       irq_set_default_host(root_domain);
 }
index a55893807274cf6964f8a3ffe683b7dfb20e5dcf..7d1a9c8b1f3dfff37cad0e5a87d9372f9030a3e8 100644 (file)
@@ -160,3 +160,8 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
        return 0; /* MicroBlaze has no separate FPU registers */
 }
 #endif /* CONFIG_MMU */
+
+void arch_cpu_idle(void)
+{
+       local_irq_enable();
+}
index 4ec137d13ad793cf9dd36521225547f41d8a21d9..b38ae3acfeb45adbe98f95c05c9c52e42721a215 100644 (file)
@@ -404,10 +404,11 @@ asmlinkage void __init mmu_init(void)
 
 #if defined(CONFIG_BLK_DEV_INITRD)
        /* Remove the init RAM disk from the available memory. */
-/*     if (initrd_start) {
-               mem_pieces_remove(&phys_avail, __pa(initrd_start),
-                                 initrd_end - initrd_start, 1);
-       }*/
+       if (initrd_start) {
+               unsigned long size;
+               size = initrd_end - initrd_start;
+               memblock_reserve(virt_to_phys(initrd_start), size);
+       }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
        /* Initialize the MMU hardware */
index 9ea521e4959ef062e3bae1d1ac31be242d88a5fc..bdb8ea100e73ee8637c8ea587378000292b0b37b 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
-#include <linux/pci.h>
 #include <linux/export.h>
 
 #include <asm/processor.h>
index 7dd65cfae83759562e43ab20bb03be071ae56ce5..d2cfe45f332b419b5c463b78eb4f44a40e0386c4 100644 (file)
@@ -17,3 +17,7 @@ obj- := $(platform-)
 obj-y += kernel/
 obj-y += mm/
 obj-y += math-emu/
+
+ifdef CONFIG_KVM
+obj-y += kvm/
+endif
index a90cfc702bb1a31cade6c98d0bd4eb0ead652f8d..7a58ab933b206a397c56b65f6a608fc4f9a1d8eb 100644 (file)
@@ -304,7 +304,6 @@ config MIPS_MALTA
        select HW_HAS_PCI
        select I8253
        select I8259
-       select MIPS_BOARDS_GEN
        select MIPS_BONITO64
        select MIPS_CPU_SCACHE
        select PCI_GT64XXX_PCI0
@@ -335,12 +334,12 @@ config MIPS_SEAD3
        select BOOT_RAW
        select CEVT_R4K
        select CSRC_R4K
+       select CSRC_GIC
        select CPU_MIPSR2_IRQ_VI
        select CPU_MIPSR2_IRQ_EI
        select DMA_NONCOHERENT
        select IRQ_CPU
        select IRQ_GIC
-       select MIPS_BOARDS_GEN
        select MIPS_CPU_SCACHE
        select MIPS_MSC
        select SYS_HAS_CPU_MIPS32_R1
@@ -352,6 +351,7 @@ config MIPS_SEAD3
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_SUPPORTS_SMARTMIPS
+       select SYS_SUPPORTS_MICROMIPS
        select USB_ARCH_HAS_EHCI
        select USB_EHCI_BIG_ENDIAN_DESC
        select USB_EHCI_BIG_ENDIAN_MMIO
@@ -910,6 +910,9 @@ config CEVT_GT641XX
 config CEVT_R4K
        bool
 
+config CEVT_GIC
+       bool
+
 config CEVT_SB1250
        bool
 
@@ -982,9 +985,6 @@ config MIPS_MSC
 config MIPS_NILE4
        bool
 
-config MIPS_DISABLE_OBSOLETE_IDE
-       bool
-
 config SYNC_R4K
        bool
 
@@ -1075,9 +1075,6 @@ config IRQ_GT641XX
 config IRQ_GIC
        bool
 
-config MIPS_BOARDS_GEN
-       bool
-
 config PCI_GT64XXX_PCI0
        bool
 
@@ -1147,7 +1144,7 @@ config BOOT_ELF32
 
 config MIPS_L1_CACHE_SHIFT
        int
-       default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL
+       default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL || SOC_RT288X
        default "6" if MIPS_CPU_SCACHE
        default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
        default "5"
@@ -1236,6 +1233,7 @@ config CPU_MIPS32_R2
        select CPU_HAS_PREFETCH
        select CPU_SUPPORTS_32BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
+       select HAVE_KVM
        help
          Choose this option to build a kernel for release 2 or later of the
          MIPS32 architecture.  Most modern embedded systems with a 32-bit
@@ -1736,6 +1734,20 @@ config 64BIT
 
 endchoice
 
+config KVM_GUEST
+       bool "KVM Guest Kernel"
+       help
+         Select this option if building a guest kernel for KVM (Trap & Emulate) mode
+
+config KVM_HOST_FREQ
+       int "KVM Host Processor Frequency (MHz)"
+       depends on KVM_GUEST
+       default 500
+       help
+         Select this option if building a guest kernel for KVM to skip
+         RTC emulation when determining guest CPU Frequency.  Instead, the guest
+         processor frequency is automatically derived from the host frequency.
+
 choice
        prompt "Kernel page size"
        default PAGE_SIZE_4KB
@@ -1811,6 +1823,15 @@ config FORCE_MAX_ZONEORDER
          The page size is not necessarily 4KB.  Keep this in mind
          when choosing a value for this option.
 
+config CEVT_GIC
+       bool "Use GIC global counter for clock events"
+       depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC)
+       help
+         Use the GIC global counter for the clock events. The R4K clock
+         event driver is always present, so if the platform ends up not
+         detecting a GIC, it will fall back to the R4K timer for the
+         generation of clock events.
+
 config BOARD_SCACHE
        bool
 
@@ -2016,6 +2037,7 @@ config SB1_PASS_2_1_WORKAROUNDS
        depends on CPU_SB1 && CPU_SB1_PASS_2
        default y
 
+
 config 64BIT_PHYS_ADDR
        bool
 
@@ -2034,6 +2056,13 @@ config CPU_HAS_SMARTMIPS
          you don't know you probably don't have SmartMIPS and should say N
          here.
 
+config CPU_MICROMIPS
+       depends on SYS_SUPPORTS_MICROMIPS
+       bool "Build kernel using microMIPS ISA"
+       help
+         When this option is enabled the kernel will be built using the
+         microMIPS ISA
+
 config CPU_HAS_WB
        bool
 
@@ -2096,6 +2125,9 @@ config SYS_SUPPORTS_HIGHMEM
 config SYS_SUPPORTS_SMARTMIPS
        bool
 
+config SYS_SUPPORTS_MICROMIPS
+       bool
+
 config ARCH_FLATMEM_ENABLE
        def_bool y
        depends on !NUMA && !CPU_LOONGSON2
@@ -2556,3 +2588,5 @@ source "security/Kconfig"
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+source "arch/mips/kvm/Kconfig"
index 6f7978f95090f5dbdfe5bf761b008becf0df6cb2..dd58a04ef4bca5dc4a6604d283722aeb428e861c 100644 (file)
@@ -114,6 +114,7 @@ cflags-$(CONFIG_CPU_BIG_ENDIAN)             += $(shell $(CC) -dumpmachine |grep -q 'mips.*e
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
 
 cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,-msmartmips)
+cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips -mno-jals)
 
 cflags-$(CONFIG_SB1XXX_CORELIS)        += $(call cc-option,-mno-sched-prolog) \
                                   -fno-omit-frame-pointer
index c8862bdc2ff281eeb270e615c95a685edbcddcde..7032ac7ecd1bb2f8083b848ca5ca01572dc4cd44 100644 (file)
@@ -31,7 +31,6 @@ config MIPS_DB1000
        select ALCHEMY_GPIOINT_AU1000
        select DMA_NONCOHERENT
        select HW_HAS_PCI
-       select MIPS_DISABLE_OBSOLETE_IDE
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
@@ -41,7 +40,6 @@ config MIPS_DB1235
        select ARCH_REQUIRE_GPIOLIB
        select HW_HAS_PCI
        select DMA_COHERENT
-       select MIPS_DISABLE_OBSOLETE_IDE
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
 
@@ -57,7 +55,6 @@ config MIPS_GPR
        select ALCHEMY_GPIOINT_AU1000
        select HW_HAS_PCI
        select DMA_NONCOHERENT
-       select MIPS_DISABLE_OBSOLETE_IDE
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_HAS_EARLY_PRINTK
 
index fa1bdd1aea15875989e187c6d14728e245058f75..b3afcdd8d77af9380c154227a17c6a5ee944d8d2 100644 (file)
@@ -5,32 +5,14 @@ platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
 
 
 #
-# AMD Alchemy Pb1100 eval board
-#
-platform-$(CONFIG_MIPS_PB1100) += alchemy/devboards/
-load-$(CONFIG_MIPS_PB1100)     += 0xffffffff80100000
-
-#
-# AMD Alchemy Pb1500 eval board
-#
-platform-$(CONFIG_MIPS_PB1500) += alchemy/devboards/
-load-$(CONFIG_MIPS_PB1500)     += 0xffffffff80100000
-
-#
-# AMD Alchemy Pb1550 eval board
-#
-platform-$(CONFIG_MIPS_PB1550) += alchemy/devboards/
-load-$(CONFIG_MIPS_PB1550)     += 0xffffffff80100000
-
-#
-# AMD Alchemy Db1000/Db1500/Db1100 eval boards
+# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100 eval boards
 #
 platform-$(CONFIG_MIPS_DB1000) += alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1000)   += -I$(srctree)/arch/mips/include/asm/mach-db1x00
 load-$(CONFIG_MIPS_DB1000)     += 0xffffffff80100000
 
 #
-# AMD Alchemy Db1200/Pb1200/Db1550/Db1300 eval boards
+# AMD Alchemy Db1200/Pb1200/Db1550/Pb1550/Db1300 eval boards
 #
 platform-$(CONFIG_MIPS_DB1235) += alchemy/devboards/
 cflags-$(CONFIG_MIPS_DB1235)   += -I$(srctree)/arch/mips/include/asm/mach-db1x00
index 28abfeef09d6ff21c74c3975721ed70a3fa45d7e..92dfa481205b066ddf4ca29a62506bcab139e568 100644 (file)
@@ -30,7 +30,6 @@
 #include <asm/sections.h>
 
 #include <asm/mach-ar7/ar7.h>
-#include <asm/mips-boards/prom.h>
 
 static int __init memsize(void)
 {
index d5b3c9057018d8ccdbd2de891d8498de3b1ca76d..a0233a2c198812980405dc05e117de444413552e 100644 (file)
@@ -51,20 +51,6 @@ static void ath79_halt(void)
                cpu_wait();
 }
 
-static void __init ath79_detect_mem_size(void)
-{
-       unsigned long size;
-
-       for (size = ATH79_MEM_SIZE_MIN; size < ATH79_MEM_SIZE_MAX;
-            size <<= 1) {
-               if (!memcmp(ath79_detect_mem_size,
-                           ath79_detect_mem_size + size, 1024))
-                       break;
-       }
-
-       add_memory_region(0, size, BOOT_MEM_RAM);
-}
-
 static void __init ath79_detect_sys_type(void)
 {
        char *chip = "????";
@@ -212,7 +198,7 @@ void __init plat_mem_setup(void)
                                         AR71XX_DDR_CTRL_SIZE);
 
        ath79_detect_sys_type();
-       ath79_detect_mem_size();
+       detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
        ath79_clocks_init();
 
        _machine_restart = ath79_restart;
index d03e8799d1cf8c09455c60e0029f5371cf67de7c..5639662fd5031a005534181569e4ef628b6a5640 100644 (file)
@@ -25,6 +25,10 @@ config BCM63XX_CPU_6358
        bool "support 6358 CPU"
        select HW_HAS_PCI
 
+config BCM63XX_CPU_6362
+       bool "support 6362 CPU"
+       select HW_HAS_PCI
+
 config BCM63XX_CPU_6368
        bool "support 6368 CPU"
        select HW_HAS_PCI
index 9aa7d44898ed11cff9c70112e0e142923e27f2f4..a9505c4867e8dd1054646643abdf1640172b0dce 100644 (file)
@@ -726,11 +726,11 @@ void __init board_prom_init(void)
        u32 val;
 
        /* read base address of boot chip select (0)
-        * 6328 does not have MPI but boots from a fixed address
+        * 6328/6362 do not have MPI but boot from a fixed address
         */
-       if (BCMCPU_IS_6328())
+       if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) {
                val = 0x18000000;
-       else {
+       else {
                val = bcm_mpi_readl(MPI_CSBASE_REG(0));
                val &= MPI_CSBASE_BASE_MASK;
        }
index b9e948d594300281cc4199c3a89e5756630e111d..c726a97fc7987d5cb87dfd04868ce3bf12e66cec 100644 (file)
 #include <bcm63xx_io.h>
 #include <bcm63xx_regs.h>
 #include <bcm63xx_reset.h>
-#include <bcm63xx_clk.h>
+
+struct clk {
+       void            (*set)(struct clk *, int);
+       unsigned int    rate;
+       unsigned int    usage;
+       int             id;
+};
 
 static DEFINE_MUTEX(clocks_mutex);
 
@@ -119,11 +125,18 @@ static struct clk clk_ephy = {
  */
 static void enetsw_set(struct clk *clk, int enable)
 {
-       if (!BCMCPU_IS_6368())
+       if (BCMCPU_IS_6328())
+               bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable);
+       else if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable);
+       else if (BCMCPU_IS_6368())
+               bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
+                               CKCTL_6368_SWPKT_USB_EN |
+                               CKCTL_6368_SWPKT_SAR_EN,
+                               enable);
+       else
                return;
-       bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
-                       CKCTL_6368_SWPKT_USB_EN |
-                       CKCTL_6368_SWPKT_SAR_EN, enable);
+
        if (enable) {
                /* reset switch core afer clock change */
                bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1);
@@ -160,6 +173,8 @@ static void usbh_set(struct clk *clk, int enable)
                bcm_hwclock_set(CKCTL_6328_USBH_EN, enable);
        else if (BCMCPU_IS_6348())
                bcm_hwclock_set(CKCTL_6348_USBH_EN, enable);
+       else if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_USBH_EN, enable);
        else if (BCMCPU_IS_6368())
                bcm_hwclock_set(CKCTL_6368_USBH_EN, enable);
 }
@@ -175,6 +190,8 @@ static void usbd_set(struct clk *clk, int enable)
 {
        if (BCMCPU_IS_6328())
                bcm_hwclock_set(CKCTL_6328_USBD_EN, enable);
+       else if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_USBD_EN, enable);
        else if (BCMCPU_IS_6368())
                bcm_hwclock_set(CKCTL_6368_USBD_EN, enable);
 }
@@ -196,6 +213,8 @@ static void spi_set(struct clk *clk, int enable)
                mask = CKCTL_6348_SPI_EN;
        else if (BCMCPU_IS_6358())
                mask = CKCTL_6358_SPI_EN;
+       else if (BCMCPU_IS_6362())
+               mask = CKCTL_6362_SPI_EN;
        else
                /* BCMCPU_IS_6368 */
                mask = CKCTL_6368_SPI_EN;
@@ -236,7 +255,10 @@ static struct clk clk_xtm = {
  */
 static void ipsec_set(struct clk *clk, int enable)
 {
-       bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
+       if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_IPSEC_EN, enable);
+       else if (BCMCPU_IS_6368())
+               bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
 }
 
 static struct clk clk_ipsec = {
@@ -249,7 +271,10 @@ static struct clk clk_ipsec = {
 
 static void pcie_set(struct clk *clk, int enable)
 {
-       bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
+       if (BCMCPU_IS_6328())
+               bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable);
+       else if (BCMCPU_IS_6362())
+               bcm_hwclock_set(CKCTL_6362_PCIE_EN, enable);
 }
 
 static struct clk clk_pcie = {
@@ -315,9 +340,9 @@ struct clk *clk_get(struct device *dev, const char *id)
                return &clk_periph;
        if (BCMCPU_IS_6358() && !strcmp(id, "pcm"))
                return &clk_pcm;
-       if (BCMCPU_IS_6368() && !strcmp(id, "ipsec"))
+       if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec"))
                return &clk_ipsec;
-       if (BCMCPU_IS_6328() && !strcmp(id, "pcie"))
+       if ((BCMCPU_IS_6328() || BCMCPU_IS_6362()) && !strcmp(id, "pcie"))
                return &clk_pcie;
        return ERR_PTR(-ENOENT);
 }
index a7afb289b15aa03f93d6665ad55969ba97cfe6a5..79fe32df5e96c99da07d763e0990e07ee0321260 100644 (file)
@@ -25,7 +25,7 @@ const int *bcm63xx_irqs;
 EXPORT_SYMBOL(bcm63xx_irqs);
 
 static u16 bcm63xx_cpu_id;
-static u16 bcm63xx_cpu_rev;
+static u8 bcm63xx_cpu_rev;
 static unsigned int bcm63xx_cpu_freq;
 static unsigned int bcm63xx_memory_size;
 
@@ -71,6 +71,15 @@ static const int bcm6358_irqs[] = {
 
 };
 
+static const unsigned long bcm6362_regs_base[] = {
+       __GEN_CPU_REGS_TABLE(6362)
+};
+
+static const int bcm6362_irqs[] = {
+       __GEN_CPU_IRQ_TABLE(6362)
+
+};
+
 static const unsigned long bcm6368_regs_base[] = {
        __GEN_CPU_REGS_TABLE(6368)
 };
@@ -87,7 +96,7 @@ u16 __bcm63xx_get_cpu_id(void)
 
 EXPORT_SYMBOL(__bcm63xx_get_cpu_id);
 
-u16 bcm63xx_get_cpu_rev(void)
+u8 bcm63xx_get_cpu_rev(void)
 {
        return bcm63xx_cpu_rev;
 }
@@ -169,6 +178,42 @@ static unsigned int detect_cpu_clock(void)
                return (16 * 1000000 * n1 * n2) / m1;
        }
 
+       case BCM6362_CPU_ID:
+       {
+               unsigned int tmp, mips_pll_fcvo;
+
+               tmp = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
+               mips_pll_fcvo = (tmp & STRAPBUS_6362_FCVO_MASK)
+                               >> STRAPBUS_6362_FCVO_SHIFT;
+               switch (mips_pll_fcvo) {
+               case 0x03:
+               case 0x0b:
+               case 0x13:
+               case 0x1b:
+                       return 240000000;
+               case 0x04:
+               case 0x0c:
+               case 0x14:
+               case 0x1c:
+                       return 160000000;
+               case 0x05:
+               case 0x0e:
+               case 0x16:
+               case 0x1e:
+               case 0x1f:
+                       return 400000000;
+               case 0x06:
+                       return 440000000;
+               case 0x07:
+               case 0x17:
+                       return 384000000;
+               case 0x15:
+               case 0x1d:
+                       return 200000000;
+               default:
+                       return 320000000;
+               }
+       }
        case BCM6368_CPU_ID:
        {
                unsigned int tmp, p1, p2, ndiv, m1;
@@ -205,7 +250,7 @@ static unsigned int detect_memory_size(void)
        unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0;
        u32 val;
 
-       if (BCMCPU_IS_6328())
+       if (BCMCPU_IS_6328() || BCMCPU_IS_6362())
                return bcm_ddr_readl(DDR_CSEND_REG) << 24;
 
        if (BCMCPU_IS_6345()) {
@@ -240,53 +285,27 @@ static unsigned int detect_memory_size(void)
 
 void __init bcm63xx_cpu_init(void)
 {
-       unsigned int tmp, expected_cpu_id;
+       unsigned int tmp;
        struct cpuinfo_mips *c = &current_cpu_data;
        unsigned int cpu = smp_processor_id();
+       u32 chipid_reg;
 
        /* soc registers location depends on cpu type */
-       expected_cpu_id = 0;
+       chipid_reg = 0;
 
        switch (c->cputype) {
        case CPU_BMIPS3300:
-               if ((read_c0_prid() & 0xff00) == PRID_IMP_BMIPS3300_ALT) {
-                       expected_cpu_id = BCM6348_CPU_ID;
-                       bcm63xx_regs_base = bcm6348_regs_base;
-                       bcm63xx_irqs = bcm6348_irqs;
-               } else {
+               if ((read_c0_prid() & 0xff00) != PRID_IMP_BMIPS3300_ALT)
                        __cpu_name[cpu] = "Broadcom BCM6338";
-                       expected_cpu_id = BCM6338_CPU_ID;
-                       bcm63xx_regs_base = bcm6338_regs_base;
-                       bcm63xx_irqs = bcm6338_irqs;
-               }
-               break;
+               /* fall-through */
        case CPU_BMIPS32:
-               expected_cpu_id = BCM6345_CPU_ID;
-               bcm63xx_regs_base = bcm6345_regs_base;
-               bcm63xx_irqs = bcm6345_irqs;
+               chipid_reg = BCM_6345_PERF_BASE;
                break;
        case CPU_BMIPS4350:
-               if ((read_c0_prid() & 0xf0) == 0x10) {
-                       expected_cpu_id = BCM6358_CPU_ID;
-                       bcm63xx_regs_base = bcm6358_regs_base;
-                       bcm63xx_irqs = bcm6358_irqs;
-               } else {
-                       /* all newer chips have the same chip id location */
-                       u16 chip_id = bcm_readw(BCM_6368_PERF_BASE);
-
-                       switch (chip_id) {
-                       case BCM6328_CPU_ID:
-                               expected_cpu_id = BCM6328_CPU_ID;
-                               bcm63xx_regs_base = bcm6328_regs_base;
-                               bcm63xx_irqs = bcm6328_irqs;
-                               break;
-                       case BCM6368_CPU_ID:
-                               expected_cpu_id = BCM6368_CPU_ID;
-                               bcm63xx_regs_base = bcm6368_regs_base;
-                               bcm63xx_irqs = bcm6368_irqs;
-                               break;
-                       }
-               }
+               if ((read_c0_prid() & 0xf0) == 0x10)
+                       chipid_reg = BCM_6345_PERF_BASE;
+               else
+                       chipid_reg = BCM_6368_PERF_BASE;
                break;
        }
 
@@ -294,20 +313,47 @@ void __init bcm63xx_cpu_init(void)
         * really early to panic, but delaying panic would not help since we
         * will never get any working console
         */
-       if (!expected_cpu_id)
+       if (!chipid_reg)
                panic("unsupported Broadcom CPU");
 
-       /*
-        * bcm63xx_regs_base is set, we can access soc registers
-        */
-
-       /* double check CPU type */
-       tmp = bcm_perf_readl(PERF_REV_REG);
+       /* read out CPU type */
+       tmp = bcm_readl(chipid_reg);
        bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT;
        bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT;
 
-       if (bcm63xx_cpu_id != expected_cpu_id)
-               panic("bcm63xx CPU id mismatch");
+       switch (bcm63xx_cpu_id) {
+       case BCM6328_CPU_ID:
+               bcm63xx_regs_base = bcm6328_regs_base;
+               bcm63xx_irqs = bcm6328_irqs;
+               break;
+       case BCM6338_CPU_ID:
+               bcm63xx_regs_base = bcm6338_regs_base;
+               bcm63xx_irqs = bcm6338_irqs;
+               break;
+       case BCM6345_CPU_ID:
+               bcm63xx_regs_base = bcm6345_regs_base;
+               bcm63xx_irqs = bcm6345_irqs;
+               break;
+       case BCM6348_CPU_ID:
+               bcm63xx_regs_base = bcm6348_regs_base;
+               bcm63xx_irqs = bcm6348_irqs;
+               break;
+       case BCM6358_CPU_ID:
+               bcm63xx_regs_base = bcm6358_regs_base;
+               bcm63xx_irqs = bcm6358_irqs;
+               break;
+       case BCM6362_CPU_ID:
+               bcm63xx_regs_base = bcm6362_regs_base;
+               bcm63xx_irqs = bcm6362_irqs;
+               break;
+       case BCM6368_CPU_ID:
+               bcm63xx_regs_base = bcm6368_regs_base;
+               bcm63xx_irqs = bcm6368_irqs;
+               break;
+       default:
+               panic("unsupported broadcom CPU %x", bcm63xx_cpu_id);
+               break;
+       }
 
        bcm63xx_cpu_freq = detect_cpu_clock();
        bcm63xx_memory_size = detect_memory_size();
index 58371c7deac20e0e3c53695ee75e99ec5ce91e85..588d1ec622e404fd2fbbe0a5d9a6868477b2c41b 100644 (file)
@@ -77,6 +77,12 @@ static int __init bcm63xx_detect_flash_type(void)
                        return BCM63XX_FLASH_TYPE_PARALLEL;
                else
                        return BCM63XX_FLASH_TYPE_SERIAL;
+       case BCM6362_CPU_ID:
+               val = bcm_misc_readl(MISC_STRAPBUS_6362_REG);
+               if (val & STRAPBUS_6362_BOOT_SEL_SERIAL)
+                       return BCM63XX_FLASH_TYPE_SERIAL;
+               else
+                       return BCM63XX_FLASH_TYPE_NAND;
        case BCM6368_CPU_ID:
                val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
                switch (val & STRAPBUS_6368_BOOT_SEL_MASK) {
index e97fd60e92ef289e215908b7327889435f3f702f..3065bb61820d5befea57f31580c91c15afcc666b 100644 (file)
 /*
  * register offsets
  */
-static const unsigned long bcm6338_regs_spi[] = {
-       __GEN_SPI_REGS_TABLE(6338)
-};
-
 static const unsigned long bcm6348_regs_spi[] = {
        __GEN_SPI_REGS_TABLE(6348)
 };
@@ -34,23 +30,15 @@ static const unsigned long bcm6358_regs_spi[] = {
        __GEN_SPI_REGS_TABLE(6358)
 };
 
-static const unsigned long bcm6368_regs_spi[] = {
-       __GEN_SPI_REGS_TABLE(6368)
-};
-
 const unsigned long *bcm63xx_regs_spi;
 EXPORT_SYMBOL(bcm63xx_regs_spi);
 
 static __init void bcm63xx_spi_regs_init(void)
 {
-       if (BCMCPU_IS_6338())
-               bcm63xx_regs_spi = bcm6338_regs_spi;
-       if (BCMCPU_IS_6348())
+       if (BCMCPU_IS_6338() || BCMCPU_IS_6348())
                bcm63xx_regs_spi = bcm6348_regs_spi;
-       if (BCMCPU_IS_6358())
+       if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
                bcm63xx_regs_spi = bcm6358_regs_spi;
-       if (BCMCPU_IS_6368())
-               bcm63xx_regs_spi = bcm6368_regs_spi;
 }
 #else
 static __init void bcm63xx_spi_regs_init(void) { }
@@ -93,13 +81,13 @@ int __init bcm63xx_spi_register(void)
        spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
 
        if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
-               spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;
-               spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE;
-               spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT;
-               spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;
+               spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1;
+               spi_pdata.fifo_size = SPI_6348_MSG_DATA_SIZE;
+               spi_pdata.msg_type_shift = SPI_6348_MSG_TYPE_SHIFT;
+               spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH;
        }
 
-       if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
+       if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) {
                spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
                spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
                spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
index da24c2bd9b7cde3cfbfa10caaa6f1ae70f6a95a5..c0ab3887f42ef8b355eed9971f04188be0b966d8 100644 (file)
@@ -82,6 +82,17 @@ static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused;
 #define ext_irq_cfg_reg1       PERF_EXTIRQ_CFG_REG_6358
 #define ext_irq_cfg_reg2       0
 #endif
+#ifdef CONFIG_BCM63XX_CPU_6362
+#define irq_stat_reg           PERF_IRQSTAT_6362_REG
+#define irq_mask_reg           PERF_IRQMASK_6362_REG
+#define irq_bits               64
+#define is_ext_irq_cascaded    1
+#define ext_irq_start          (BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE)
+#define ext_irq_end            (BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE)
+#define ext_irq_count          4
+#define ext_irq_cfg_reg1       PERF_EXTIRQ_CFG_REG_6362
+#define ext_irq_cfg_reg2       0
+#endif
 #ifdef CONFIG_BCM63XX_CPU_6368
 #define irq_stat_reg           PERF_IRQSTAT_6368_REG
 #define irq_mask_reg           PERF_IRQMASK_6368_REG
@@ -170,6 +181,16 @@ static void bcm63xx_init_irq(void)
                ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
                ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
                break;
+       case BCM6362_CPU_ID:
+               irq_stat_addr += PERF_IRQSTAT_6362_REG;
+               irq_mask_addr += PERF_IRQMASK_6362_REG;
+               irq_bits = 64;
+               ext_irq_count = 4;
+               is_ext_irq_cascaded = 1;
+               ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+               ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
+               ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
+               break;
        case BCM6368_CPU_ID:
                irq_stat_addr += PERF_IRQSTAT_6368_REG;
                irq_mask_addr += PERF_IRQMASK_6368_REG;
@@ -458,6 +479,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d,
        case BCM6338_CPU_ID:
        case BCM6345_CPU_ID:
        case BCM6358_CPU_ID:
+       case BCM6362_CPU_ID:
        case BCM6368_CPU_ID:
                if (levelsense)
                        reg |= EXTIRQ_CFG_LEVELSENSE(irq);
index 10eaff4580710d4eae4879691502c55ebef3f1d6..fd698087fbfd9367b777e1391aaabb53789b2936 100644 (file)
@@ -36,6 +36,8 @@ void __init prom_init(void)
                mask = CKCTL_6348_ALL_SAFE_EN;
        else if (BCMCPU_IS_6358())
                mask = CKCTL_6358_ALL_SAFE_EN;
+       else if (BCMCPU_IS_6362())
+               mask = CKCTL_6362_ALL_SAFE_EN;
        else if (BCMCPU_IS_6368())
                mask = CKCTL_6368_ALL_SAFE_EN;
        else
index 68a31bb90cbf6de79acda74449b0bbfa749a9f21..317931c6cf58f9305b001e25d735fcf189f18dff 100644 (file)
 #define BCM6358_RESET_PCIE     0
 #define BCM6358_RESET_PCIE_EXT 0
 
+#define BCM6362_RESET_SPI      SOFTRESET_6362_SPI_MASK
+#define BCM6362_RESET_ENET     0
+#define BCM6362_RESET_USBH     SOFTRESET_6362_USBH_MASK
+#define BCM6362_RESET_USBD     SOFTRESET_6362_USBS_MASK
+#define BCM6362_RESET_DSL      0
+#define BCM6362_RESET_SAR      SOFTRESET_6362_SAR_MASK
+#define BCM6362_RESET_EPHY     SOFTRESET_6362_EPHY_MASK
+#define BCM6362_RESET_ENETSW   SOFTRESET_6362_ENETSW_MASK
+#define BCM6362_RESET_PCM      SOFTRESET_6362_PCM_MASK
+#define BCM6362_RESET_MPI      0
+#define BCM6362_RESET_PCIE      (SOFTRESET_6362_PCIE_MASK | \
+                                SOFTRESET_6362_PCIE_CORE_MASK)
+#define BCM6362_RESET_PCIE_EXT SOFTRESET_6362_PCIE_EXT_MASK
+
 #define BCM6368_RESET_SPI      SOFTRESET_6368_SPI_MASK
 #define BCM6368_RESET_ENET     0
 #define BCM6368_RESET_USBH     SOFTRESET_6368_USBH_MASK
@@ -119,6 +133,10 @@ static const u32 bcm6358_reset_bits[] = {
        __GEN_RESET_BITS_TABLE(6358)
 };
 
+static const u32 bcm6362_reset_bits[] = {
+       __GEN_RESET_BITS_TABLE(6362)
+};
+
 static const u32 bcm6368_reset_bits[] = {
        __GEN_RESET_BITS_TABLE(6368)
 };
@@ -140,6 +158,9 @@ static int __init bcm63xx_reset_bits_init(void)
        } else if (BCMCPU_IS_6358()) {
                reset_reg = PERF_SOFTRESET_6358_REG;
                bcm63xx_reset_bits = bcm6358_reset_bits;
+       } else if (BCMCPU_IS_6362()) {
+               reset_reg = PERF_SOFTRESET_6362_REG;
+               bcm63xx_reset_bits = bcm6362_reset_bits;
        } else if (BCMCPU_IS_6368()) {
                reset_reg = PERF_SOFTRESET_6368_REG;
                bcm63xx_reset_bits = bcm6368_reset_bits;
@@ -182,6 +203,13 @@ static const u32 bcm63xx_reset_bits[] = {
 #define reset_reg PERF_SOFTRESET_6358_REG
 #endif
 
+#ifdef CONFIG_BCM63XX_CPU_6362
+static const u32 bcm63xx_reset_bits[] = {
+       __GEN_RESET_BITS_TABLE(6362)
+};
+#define reset_reg PERF_SOFTRESET_6362_REG
+#endif
+
 #ifdef CONFIG_BCM63XX_CPU_6368
 static const u32 bcm63xx_reset_bits[] = {
        __GEN_RESET_BITS_TABLE(6368)
index 35e18e98beb96b04151999aac8bf037129efc324..24a24445db64edd7d1388c35f3bd471e049489a9 100644 (file)
@@ -83,6 +83,9 @@ void bcm63xx_machine_reboot(void)
        case BCM6358_CPU_ID:
                perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358;
                break;
+       case BCM6362_CPU_ID:
+               perf_regs[0] = PERF_EXTIRQ_CFG_REG_6362;
+               break;
        }
 
        for (i = 0; i < 2; i++) {
@@ -126,7 +129,7 @@ static void __bcm63xx_machine_reboot(char *p)
 const char *get_system_type(void)
 {
        static char buf[128];
-       snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%04X)",
+       snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%02X)",
                 board_get_name(),
                 bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev());
        return buf;
index 156aa6143e1117646ab7b81ccf57ca623a258513..a22f06a6f7cac31e3506619371615b8f4e1c8d4b 100644 (file)
@@ -1032,9 +1032,8 @@ static int octeon_irq_gpio_map_common(struct irq_domain *d,
        if (!octeon_irq_virq_in_range(virq))
                return -EINVAL;
 
-       hw += gpiod->base_hwirq;
-       line = hw >> 6;
-       bit = hw & 63;
+       line = (hw + gpiod->base_hwirq) >> 6;
+       bit = (hw + gpiod->base_hwirq) & 63;
        if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
                return -EINVAL;
 
index cd732e5b4fd5f856df3f46a0a2ee83d0d7491f66..ce1d3eeeb7373373fa0f07056ff3ca39e0f0c496 100644 (file)
@@ -2,30 +2,21 @@ CONFIG_MIPS_MALTA=y
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_CPU_MIPS32_R2=y
 CONFIG_MIPS_MT_SMP=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
 CONFIG_HZ_100=y
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_LOG_BUF_SHIFT=15
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_RELAY=y
 CONFIG_NAMESPACES=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
-CONFIG_PID_NS=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_RELAY=y
 CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
 CONFIG_PCI=y
-CONFIG_PM=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_XFRM_USER=m
@@ -41,8 +32,6 @@ CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_IP_PNP_BOOTP=y
 CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
 CONFIG_IP_MROUTE=y
 CONFIG_IP_PIMSM_V1=y
 CONFIG_IP_PIMSM_V2=y
@@ -65,7 +54,6 @@ CONFIG_IPV6_MROUTE=y
 CONFIG_IPV6_PIMSM_V2=y
 CONFIG_NETWORK_SECMARK=y
 CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -136,23 +124,15 @@ CONFIG_IP_VS_DH=m
 CONFIG_IP_VS_SH=m
 CONFIG_IP_VS_SED=m
 CONFIG_IP_VS_NQ=m
-CONFIG_IP_VS_FTP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_IP_NF_QUEUE=m
 CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
 CONFIG_IP_NF_TARGET_ULOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -162,8 +142,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_QUEUE=m
-CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
 CONFIG_IP6_NF_MATCH_FRAG=m
@@ -173,7 +151,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
 CONFIG_IP6_NF_MATCH_MH=m
 CONFIG_IP6_NF_MATCH_RT=m
 CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_TARGET_LOG=m
 CONFIG_IP6_NF_FILTER=m
 CONFIG_IP6_NF_TARGET_REJECT=m
 CONFIG_IP6_NF_MANGLE=m
@@ -247,12 +224,10 @@ CONFIG_MAC80211=m
 CONFIG_MAC80211_RC_PID=y
 CONFIG_MAC80211_RC_DEFAULT_PID=y
 CONFIG_MAC80211_MESH=y
-CONFIG_MAC80211_LEDS=y
 CONFIG_RFKILL=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_CONNECTOR=m
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_OOPS=m
@@ -271,7 +246,6 @@ CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
-# CONFIG_MISC_DEVICES is not set
 CONFIG_IDE=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_IDE_GENERIC=y
@@ -317,13 +291,19 @@ CONFIG_DM_MIRROR=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_NETDEVICES=y
-CONFIG_IFB=m
-CONFIG_DUMMY=m
 CONFIG_BONDING=m
-CONFIG_MACVLAN=m
+CONFIG_DUMMY=m
 CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
 CONFIG_MARVELL_PHY=m
 CONFIG_DAVICOM_PHY=m
 CONFIG_QSEMI_PHY=m
@@ -334,14 +314,6 @@ CONFIG_SMSC_PHY=m
 CONFIG_BROADCOM_PHY=m
 CONFIG_ICPLUS_PHY=m
 CONFIG_REALTEK_PHY=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_NET_ETHERNET=y
-CONFIG_AX88796=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=y
-CONFIG_TC35815=m
-CONFIG_CHELSIO_T3=m
-CONFIG_NETXEN_NIC=m
 CONFIG_ATMEL=m
 CONFIG_PCI_ATMEL=m
 CONFIG_PRISM54=m
@@ -352,15 +324,7 @@ CONFIG_HOSTAP_PLX=m
 CONFIG_HOSTAP_PCI=m
 CONFIG_IPW2100=m
 CONFIG_IPW2100_MONITOR=y
-CONFIG_IPW2200=m
-CONFIG_IPW2200_MONITOR=y
-CONFIG_IPW2200_PROMISCUOUS=y
-CONFIG_IPW2200_QOS=y
 CONFIG_LIBERTAS=m
-CONFIG_HERMES=m
-CONFIG_PLX_HERMES=m
-CONFIG_TMD_HERMES=m
-CONFIG_NORTEL_HERMES=m
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO_I8042 is not set
@@ -373,12 +337,6 @@ CONFIG_FB_CIRRUS=y
 # CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_HID=m
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGER_TIMER=m
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=m
-CONFIG_LEDS_TRIGGER_BACKLIGHT=m
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_CMOS=y
 CONFIG_UIO=m
@@ -398,7 +356,6 @@ CONFIG_XFS_QUOTA=y
 CONFIG_XFS_POSIX_ACL=y
 CONFIG_QUOTA=y
 CONFIG_QFMT_V2=y
-CONFIG_AUTOFS_FS=y
 CONFIG_FUSE_FS=m
 CONFIG_ISO9660_FS=m
 CONFIG_JOLIET=y
@@ -425,7 +382,6 @@ CONFIG_ROMFS_FS=m
 CONFIG_SYSV_FS=m
 CONFIG_UFS_FS=m
 CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
 CONFIG_ROOT_NFS=y
 CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
@@ -466,7 +422,6 @@ CONFIG_NLS_ISO8859_14=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_KOI8_R=m
 CONFIG_NLS_KOI8_U=m
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_CRYPTO_NULL=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_LRW=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
new file mode 100644 (file)
index 0000000..341bb47
--- /dev/null
@@ -0,0 +1,456 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_MIPS_DYN_TRANS=y
+CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
+CONFIG_VHOST_NET=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
new file mode 100644 (file)
index 0000000..2b8558b
--- /dev/null
@@ -0,0 +1,453 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_KVM_GUEST=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
new file mode 100644 (file)
index 0000000..93057a7
--- /dev/null
@@ -0,0 +1,195 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_VPE_LOADER=y
+CONFIG_MIPS_VPE_APSP_API=y
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="aprp"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmtc_defconfig b/arch/mips/configs/maltasmtc_defconfig
new file mode 100644 (file)
index 0000000..4e54b75
--- /dev/null
@@ -0,0 +1,196 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_MT_SMTC=y
+# CONFIG_MIPS_MT_FPAFF is not set
+CONFIG_NR_CPUS=9
+CONFIG_HZ_48=y
+CONFIG_LOCALVERSION="smtc"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
new file mode 100644 (file)
index 0000000..8a66602
--- /dev/null
@@ -0,0 +1,199 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_SCHED_SMT=y
+CONFIG_MIPS_CMP=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="cmp"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
new file mode 100644 (file)
index 0000000..9868fc9
--- /dev/null
@@ -0,0 +1,194 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HZ_100=y
+CONFIG_LOCALVERSION="up"
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
index e3eec68d91326fe9784304b32cb988b5f7e62059..0abe681c11a07abdf4945aa46b8beeeac310fa65 100644 (file)
@@ -2,7 +2,6 @@ CONFIG_MIPS_SEAD3=y
 CONFIG_CPU_LITTLE_ENDIAN=y
 CONFIG_CPU_MIPS32_R2=y
 CONFIG_HZ_100=y
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_NO_HZ=y
@@ -115,10 +114,8 @@ CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_ISO8859_15=y
 CONFIG_NLS_UTF8=y
 # CONFIG_FTRACE is not set
-CONFIG_CRYPTO=y
 CONFIG_CRYPTO_CBC=y
 CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_AES=y
 CONFIG_CRYPTO_ARC4=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 # CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3micro_defconfig b/arch/mips/configs/sead3micro_defconfig
new file mode 100644 (file)
index 0000000..2a0da5b
--- /dev/null
@@ -0,0 +1,122 @@
+CONFIG_MIPS_SEAD3=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MICROMIPS=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_SMSC911X=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_ICPLUS_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_CONSOLE_TRANSLATIONS is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=32
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_SPI=y
+CONFIG_SENSORS_ADT7475=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_DEBUG=y
+CONFIG_MMC_SPI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_M41T80=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_UTF8=y
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
index 84befc968fc4a3aee396b6220b00525cac141d0a..5291505167774d3da7000092e26512c7e29e454d 100644 (file)
@@ -2,4 +2,6 @@
 # Makefile for generic prom monitor library routines under Linux.
 #
 
+lib-y                  += cmdline.o
+
 lib-$(CONFIG_64BIT)    += call_o32.o
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
new file mode 100644 (file)
index 0000000..ffd0345
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/addrspace.h>
+#include <asm/fw/fw.h>
+
+int fw_argc;
+int *_fw_argv;
+int *_fw_envp;
+
+void __init fw_init_cmdline(void)
+{
+       int i;
+
+       /* Validate command line parameters. */
+       if ((fw_arg0 >= CKSEG0) || (fw_arg1 < CKSEG0)) {
+               fw_argc = 0;
+               _fw_argv = NULL;
+       } else {
+               fw_argc = (fw_arg0 & 0x0000ffff);
+               _fw_argv = (int *)fw_arg1;
+       }
+
+       /* Validate environment pointer. */
+       if (fw_arg2 < CKSEG0)
+               _fw_envp = NULL;
+       else
+               _fw_envp = (int *)fw_arg2;
+
+       for (i = 1; i < fw_argc; i++) {
+               strlcat(arcs_cmdline, fw_argv(i), COMMAND_LINE_SIZE);
+               if (i < (fw_argc - 1))
+                       strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
+       }
+}
+
+char * __init fw_getcmdline(void)
+{
+       return &(arcs_cmdline[0]);
+}
+
+char *fw_getenv(char *envname)
+{
+       char *result = NULL;
+
+       if (_fw_envp != NULL) {
+               /*
+                * Return a pointer to the given environment variable.
+                * YAMON uses "name", "value" pairs, while U-Boot uses
+                * "name=value".
+                */
+               int i, yamon, index = 0;
+
+               yamon = (strchr(fw_envp(index), '=') == NULL);
+               i = strlen(envname);
+
+               while (fw_envp(index)) {
+                       if (strncmp(envname, fw_envp(index), i) == 0) {
+                               if (yamon) {
+                                       result = fw_envp(index + 1);
+                                       break;
+                               } else if (fw_envp(index)[i] == '=') {
+                                       result = (fw_envp(index + 1) + i);
+                                       break;
+                               }
+                       }
+
+                       /* Increment array index. */
+                       if (yamon)
+                               index += 2;
+                       else
+                               index += 1;
+               }
+       }
+
+       return result;
+}
+
+unsigned long fw_getenvl(char *envname)
+{
+       unsigned long envl = 0UL;
+       char *str;
+       long val;
+       int tmp;
+
+       str = fw_getenv(envname);
+       if (str) {
+               tmp = kstrtol(str, 0, &val);
+               envl = (unsigned long)val;
+       }
+
+       return envl;
+}
index 164a21e65b421de9b7265a1bd403a3895e8092c7..879691d194af426f5532d47c4e467491bf785038 100644 (file)
@@ -296,6 +296,7 @@ symbol              =       value
 #define LONG_SUBU      subu
 #define LONG_L         lw
 #define LONG_S         sw
+#define LONG_SP                swp
 #define LONG_SLL       sll
 #define LONG_SLLV      sllv
 #define LONG_SRL       srl
@@ -318,6 +319,7 @@ symbol              =       value
 #define LONG_SUBU      dsubu
 #define LONG_L         ld
 #define LONG_S         sd
+#define LONG_SP                sdp
 #define LONG_SLL       dsll
 #define LONG_SLLV      dsllv
 #define LONG_SRL       dsrl
index b71dd5b160854a0e8f7fb54e4845af0ba535d748..4d2cdea5aa37f46e05e1e06e7f8d378bf9331c13 100644 (file)
@@ -104,6 +104,7 @@ struct boot_mem_map {
 extern struct boot_mem_map boot_mem_map;
 
 extern void add_memory_region(phys_t start, phys_t size, long type);
+extern void detect_memory_region(phys_t start, phys_t sz_min,  phys_t sz_max);
 
 extern void prom_init(void);
 extern void prom_free_prom_memory(void);
index 888766ae1f8598f1ff98373c6dbc7a6658307fc8..e28a3e0eb3cb6407b4ad6d6ac649e20a876ab69d 100644 (file)
 #include <asm/ptrace.h>
 #include <asm/inst.h>
 
+extern int __isa_exception_epc(struct pt_regs *regs);
+extern int __compute_return_epc(struct pt_regs *regs);
+extern int __compute_return_epc_for_insn(struct pt_regs *regs,
+                                        union mips_instruction insn);
+extern int __microMIPS_compute_return_epc(struct pt_regs *regs);
+extern int __MIPS16e_compute_return_epc(struct pt_regs *regs);
+
+
 static inline int delay_slot(struct pt_regs *regs)
 {
        return regs->cp0_cause & CAUSEF_BD;
@@ -18,20 +26,27 @@ static inline int delay_slot(struct pt_regs *regs)
 
 static inline unsigned long exception_epc(struct pt_regs *regs)
 {
-       if (!delay_slot(regs))
+       if (likely(!delay_slot(regs)))
                return regs->cp0_epc;
 
+       if (get_isa16_mode(regs->cp0_epc))
+               return __isa_exception_epc(regs);
+
        return regs->cp0_epc + 4;
 }
 
 #define BRANCH_LIKELY_TAKEN 0x0001
 
-extern int __compute_return_epc(struct pt_regs *regs);
-extern int __compute_return_epc_for_insn(struct pt_regs *regs,
-                                        union mips_instruction insn);
-
 static inline int compute_return_epc(struct pt_regs *regs)
 {
+       if (get_isa16_mode(regs->cp0_epc)) {
+               if (cpu_has_mmips)
+                       return __microMIPS_compute_return_epc(regs);
+               if (cpu_has_mips16)
+                       return __MIPS16e_compute_return_epc(regs);
+               return regs->cp0_epc;
+       }
+
        if (!delay_slot(regs)) {
                regs->cp0_epc += 4;
                return 0;
@@ -40,4 +55,19 @@ static inline int compute_return_epc(struct pt_regs *regs)
        return __compute_return_epc(regs);
 }
 
+static inline int MIPS16e_compute_return_epc(struct pt_regs *regs,
+                                            union mips16e_instruction *inst)
+{
+       if (likely(!delay_slot(regs))) {
+               if (inst->ri.opcode == MIPS16e_extend_op) {
+                       regs->cp0_epc += 4;
+                       return 0;
+               }
+               regs->cp0_epc += 2;
+               return 0;
+       }
+
+       return __MIPS16e_compute_return_epc(regs);
+}
+
 #endif /* _ASM_BRANCH_H */
index 1a57e8b4d0924b488d0e72b6c79dcb6516ffe53f..e5ec8fcd8afaf9d822167605a8f4aaed22e9486f 100644 (file)
 #ifndef cpu_has_pindexed_dcache
 #define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
 #endif
+#ifndef cpu_has_local_ebase
+#define cpu_has_local_ebase    1
+#endif
 
 /*
  * I-Cache snoops remote store.         This only matters on SMP.  Some multiprocessors
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
new file mode 100644 (file)
index 0000000..242cbb3
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
+ *
+ */
+#ifndef __ASM_DMA_COHERENCE_H
+#define __ASM_DMA_COHERENCE_H
+
+extern int coherentio;
+extern int hw_coherentio;
+
+#endif
index f8fc74b6cb47430b540d072833d62872192349d8..84238c574d5e6bff1db1db79d58a5ddbd4436422 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_DMA_MAPPING_H
 
 #include <asm/scatterlist.h>
+#include <asm/dma-coherence.h>
 #include <asm/cache.h>
 #include <asm-generic/dma-coherent.h>
 
index 3b4092705567b7b4e65dd2680febc6a5a8e540cc..2abb587d5ab40fd41aa950d1987d5f854f60e531 100644 (file)
@@ -54,6 +54,12 @@ do {                                                                 \
 extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
        unsigned long cpc);
 extern int do_dsemulret(struct pt_regs *xcp);
+extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
+                                   struct mips_fpu_struct *ctx, int has_fpu,
+                                   void *__user *fault_addr);
+int process_fpemu_return(int sig, void __user *fault_addr);
+int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+                    unsigned long *contpc);
 
 /*
  * Instruction inserted following the badinst to further tag the sequence
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
new file mode 100644 (file)
index 0000000..d6c50a7
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ */
+#ifndef __ASM_FW_H_
+#define __ASM_FW_H_
+
+#include <asm/bootinfo.h>      /* For cleaner code... */
+
+enum fw_memtypes {
+       fw_dontuse,
+       fw_code,
+       fw_free,
+};
+
+typedef struct {
+       unsigned long base;     /* Within KSEG0 */
+       unsigned int size;      /* bytes */
+       enum fw_memtypes type;  /* fw_memtypes */
+} fw_memblock_t;
+
+/* Maximum number of memory block descriptors. */
+#define FW_MAX_MEMBLOCKS       32
+
+extern int fw_argc;
+extern int *_fw_argv;
+extern int *_fw_envp;
+
+/*
+ * Most firmware like YAMON, PMON, etc. pass arguments and environment
+ * variables as 32-bit pointers. These take care of sign extension.
+ */
+#define fw_argv(index)         ((char *)(long)_fw_argv[(index)])
+#define fw_envp(index)         ((char *)(long)_fw_envp[(index)])
+
+extern void fw_init_cmdline(void);
+extern char *fw_getcmdline(void);
+extern fw_memblock_t *fw_getmdesc(void);
+extern void fw_meminit(void);
+extern char *fw_getenv(char *name);
+extern unsigned long fw_getenvl(char *name);
+extern void fw_init_early_console(char port);
+
+#endif /* __ASM_FW_H_ */
index bdc9786ab5a7bcbf278b3fe1f77a73ae49dd2767..7153b32de18e6692e2792be19a34b886d1121ba8 100644 (file)
 #define GIC_VPE_WD_COUNT0_OFS          0x0094
 #define GIC_VPE_WD_INITIAL0_OFS                0x0098
 #define GIC_VPE_COMPARE_LO_OFS         0x00a0
-#define GIC_VPE_COMPARE_HI             0x00a4
+#define GIC_VPE_COMPARE_HI_OFS         0x00a4
 
 #define GIC_VPE_EIC_SHADOW_SET_BASE    0x0100
 #define GIC_VPE_EIC_SS(intr) \
@@ -359,7 +359,11 @@ struct gic_shared_intr_map {
 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
 #define GIC_PIN_TO_VEC_OFFSET  (1)
 
-extern int gic_present;
+#include <linux/clocksource.h>
+#include <linux/irq.h>
+
+extern unsigned int gic_present;
+extern unsigned int gic_frequency;
 extern unsigned long _gic_base;
 extern unsigned int gic_irq_base;
 extern unsigned int gic_irq_flags[];
@@ -368,18 +372,20 @@ extern struct gic_shared_intr_map gic_shared_intr_map[];
 extern void gic_init(unsigned long gic_base_addr,
        unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
        unsigned int intrmap_size, unsigned int irqbase);
-
 extern void gic_clocksource_init(unsigned int);
-extern unsigned int gic_get_int(void);
+extern unsigned int gic_compare_int (void);
+extern cycle_t gic_read_count(void);
+extern cycle_t gic_read_compare(void);
+extern void gic_write_compare(cycle_t cnt);
 extern void gic_send_ipi(unsigned int intr);
 extern unsigned int plat_ipi_call_int_xlate(unsigned int);
 extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
 extern void gic_bind_eic_interrupt(int irq, int set);
 extern unsigned int gic_get_timer_pending(void);
+extern unsigned int gic_get_int(void);
 extern void gic_enable_interrupt(int irq_vec);
 extern void gic_disable_interrupt(int irq_vec);
 extern void gic_irq_ack(struct irq_data *d);
 extern void gic_finish_irq(struct irq_data *d);
 extern void gic_platform_init(int irqs, struct irq_chip *irq_controller);
-
 #endif /* _ASM_GICREGS_H */
index 44d6a5bde4a1cb9e8db930a11d1b07b23f383507..e3ee92d4dbe750c7aa05a5488f7443cdd64fb387 100644 (file)
 #ifndef _ASM_HAZARDS_H
 #define _ASM_HAZARDS_H
 
-#ifdef __ASSEMBLY__
-#define ASMMACRO(name, code...) .macro name; code; .endm
-#else
-
-#include <asm/cpu-features.h>
-
-#define ASMMACRO(name, code...)                                                \
-__asm__(".macro " #name "; " #code "; .endm");                         \
-                                                                       \
-static inline void name(void)                                          \
-{                                                                      \
-       __asm__ __volatile__ (#name);                                   \
-}
-
-/*
- * MIPS R2 instruction hazard barrier.  Needs to be called as a subroutine.
- */
-extern void mips_ihb(void);
-
-#endif
+#include <linux/stringify.h>
 
-ASMMACRO(_ssnop,
-        sll    $0, $0, 1
-       )
+#define ___ssnop                                                       \
+       sll     $0, $0, 1
 
-ASMMACRO(_ehb,
-        sll    $0, $0, 3
-       )
+#define ___ehb                                                         \
+       sll     $0, $0, 3
 
 /*
  * TLB hazards
@@ -48,24 +27,24 @@ ASMMACRO(_ehb,
  * MIPSR2 defines ehb for hazard avoidance
  */
 
-ASMMACRO(mtc0_tlbw_hazard,
-        _ehb
-       )
-ASMMACRO(tlbw_use_hazard,
-        _ehb
-       )
-ASMMACRO(tlb_probe_hazard,
-        _ehb
-       )
-ASMMACRO(irq_enable_hazard,
-        _ehb
-       )
-ASMMACRO(irq_disable_hazard,
-       _ehb
-       )
-ASMMACRO(back_to_back_c0_hazard,
-        _ehb
-       )
+#define __mtc0_tlbw_hazard                                             \
+       ___ehb
+
+#define __tlbw_use_hazard                                              \
+       ___ehb
+
+#define __tlb_probe_hazard                                             \
+       ___ehb
+
+#define __irq_enable_hazard                                            \
+       ___ehb
+
+#define __irq_disable_hazard                                           \
+       ___ehb
+
+#define __back_to_back_c0_hazard                                       \
+       ___ehb
+
 /*
  * gcc has a tradition of misscompiling the previous construct using the
  * address of a label as argument to inline assembler. Gas otoh has the
@@ -94,24 +73,42 @@ do {                                                                        \
  * These are slightly complicated by the fact that we guarantee R1 kernels to
  * run fine on R2 processors.
  */
-ASMMACRO(mtc0_tlbw_hazard,
-       _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(tlbw_use_hazard,
-       _ssnop; _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(tlb_probe_hazard,
-        _ssnop; _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(irq_enable_hazard,
-        _ssnop; _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(irq_disable_hazard,
-       _ssnop; _ssnop; _ssnop; _ehb
-       )
-ASMMACRO(back_to_back_c0_hazard,
-        _ssnop; _ssnop; _ssnop; _ehb
-       )
+
+#define __mtc0_tlbw_hazard                                             \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __tlbw_use_hazard                                              \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __tlb_probe_hazard                                             \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __irq_enable_hazard                                            \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __irq_disable_hazard                                           \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
+#define __back_to_back_c0_hazard                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ehb
+
 /*
  * gcc has a tradition of misscompiling the previous construct using the
  * address of a label as argument to inline assembler. Gas otoh has the
@@ -147,18 +144,18 @@ do {                                                                      \
  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
  */
 
-ASMMACRO(mtc0_tlbw_hazard,
-       )
-ASMMACRO(tlbw_use_hazard,
-       )
-ASMMACRO(tlb_probe_hazard,
-       )
-ASMMACRO(irq_enable_hazard,
-       )
-ASMMACRO(irq_disable_hazard,
-       )
-ASMMACRO(back_to_back_c0_hazard,
-       )
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard
+
+#define __back_to_back_c0_hazard
+
 #define instruction_hazard() do { } while (0)
 
 #elif defined(CONFIG_CPU_SB1)
@@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard,
 /*
  * Mostly like R4000 for historic reasons
  */
-ASMMACRO(mtc0_tlbw_hazard,
-       )
-ASMMACRO(tlbw_use_hazard,
-       )
-ASMMACRO(tlb_probe_hazard,
-       )
-ASMMACRO(irq_enable_hazard,
-       )
-ASMMACRO(irq_disable_hazard,
-        _ssnop; _ssnop; _ssnop
-       )
-ASMMACRO(back_to_back_c0_hazard,
-       )
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard                                           \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop
+
+#define __back_to_back_c0_hazard
+
 #define instruction_hazard() do { } while (0)
 
 #else
@@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard,
  * hazard so this is nice trick to have an optimal code for a range of
  * processors.
  */
-ASMMACRO(mtc0_tlbw_hazard,
-       nop; nop
-       )
-ASMMACRO(tlbw_use_hazard,
-       nop; nop; nop
-       )
-ASMMACRO(tlb_probe_hazard,
-        nop; nop; nop
-       )
-ASMMACRO(irq_enable_hazard,
-        _ssnop; _ssnop; _ssnop;
-       )
-ASMMACRO(irq_disable_hazard,
-       nop; nop; nop
-       )
-ASMMACRO(back_to_back_c0_hazard,
-        _ssnop; _ssnop; _ssnop;
-       )
+#define __mtc0_tlbw_hazard                                             \
+       nop;                                                            \
+       nop
+
+#define __tlbw_use_hazard                                              \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define __tlb_probe_hazard                                             \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define __irq_enable_hazard                                            \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop
+
+#define __irq_disable_hazard                                           \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define __back_to_back_c0_hazard                                       \
+       ___ssnop;                                                       \
+       ___ssnop;                                                       \
+       ___ssnop
+
 #define instruction_hazard() do { } while (0)
 
 #endif
@@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard,
 /* FPU hazards */
 
 #if defined(CONFIG_CPU_SB1)
-ASMMACRO(enable_fpu_hazard,
-        .set   push;
-        .set   mips64;
-        .set   noreorder;
-        _ssnop;
-        bnezl  $0, .+4;
-        _ssnop;
-        .set   pop
-)
-ASMMACRO(disable_fpu_hazard,
-)
+
+#define __enable_fpu_hazard                                            \
+       .set    push;                                                   \
+       .set    mips64;                                                 \
+       .set    noreorder;                                              \
+       ___ssnop;                                                       \
+       bnezl   $0, .+4;                                                \
+       ___ssnop;                                                       \
+       .set    pop
+
+#define __disable_fpu_hazard
 
 #elif defined(CONFIG_CPU_MIPSR2)
-ASMMACRO(enable_fpu_hazard,
-        _ehb
-)
-ASMMACRO(disable_fpu_hazard,
-        _ehb
-)
+
+#define __enable_fpu_hazard                                            \
+       ___ehb
+
+#define __disable_fpu_hazard                                           \
+       ___ehb
+
 #else
-ASMMACRO(enable_fpu_hazard,
-        nop; nop; nop; nop
-)
-ASMMACRO(disable_fpu_hazard,
-        _ehb
-)
+
+#define __enable_fpu_hazard                                            \
+       nop;                                                            \
+       nop;                                                            \
+       nop;                                                            \
+       nop
+
+#define __disable_fpu_hazard                                           \
+       ___ehb
+
 #endif
 
+#ifdef __ASSEMBLY__
+
+#define _ssnop ___ssnop
+#define        _ehb ___ehb
+#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
+#define tlbw_use_hazard __tlbw_use_hazard
+#define tlb_probe_hazard __tlb_probe_hazard
+#define irq_enable_hazard __irq_enable_hazard
+#define irq_disable_hazard __irq_disable_hazard
+#define back_to_back_c0_hazard __back_to_back_c0_hazard
+#define enable_fpu_hazard __enable_fpu_hazard
+#define disable_fpu_hazard __disable_fpu_hazard
+
+#else
+
+#define _ssnop()                                                       \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(___ssnop)                                           \
+       );                                                              \
+} while (0)
+
+#define        _ehb()                                                          \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(___ehb)                                             \
+       );                                                              \
+} while (0)
+
+
+#define mtc0_tlbw_hazard()                                             \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__mtc0_tlbw_hazard)                                 \
+       );                                                              \
+} while (0)
+
+
+#define tlbw_use_hazard()                                              \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__tlbw_use_hazard)                                  \
+       );                                                              \
+} while (0)
+
+
+#define tlb_probe_hazard()                                             \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__tlb_probe_hazard)                                 \
+       );                                                              \
+} while (0)
+
+
+#define irq_enable_hazard()                                            \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__irq_enable_hazard)                                \
+       );                                                              \
+} while (0)
+
+
+#define irq_disable_hazard()                                           \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__irq_disable_hazard)                               \
+       );                                                              \
+} while (0)
+
+
+#define back_to_back_c0_hazard()                                       \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__back_to_back_c0_hazard)                           \
+       );                                                              \
+} while (0)
+
+
+#define enable_fpu_hazard()                                            \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__enable_fpu_hazard)                                \
+       );                                                              \
+} while (0)
+
+
+#define disable_fpu_hazard()                                           \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       __stringify(__disable_fpu_hazard)                               \
+       );                                                              \
+} while (0)
+
+/*
+ * MIPS R2 instruction hazard barrier.   Needs to be called as a subroutine.
+ */
+extern void mips_ihb(void);
+
+#endif /* __ASSEMBLY__  */
+
 #endif /* _ASM_HAZARDS_H */
index f1eadf764071d1727af40d22a1476a378fc5ad1d..22912f78401c2f6b2a5d8b491146a485a84f1106 100644 (file)
 
 typedef unsigned int mips_instruction;
 
+/* microMIPS instruction decode structure. Do NOT export!!! */
+struct mm_decoded_insn {
+       mips_instruction insn;
+       mips_instruction next_insn;
+       int pc_inc;
+       int next_pc_inc;
+       int micro_mips_mode;
+};
+
+/* Recode table from 16-bit register notation to 32-bit GPR. Do NOT export!!! */
+extern const int reg16to32[];
+
 #endif /* _ASM_INST_H */
index 9f3384c789d7bfdff5511706b0dffc3d2962c96c..45c00951888b4661ecbcf93bcc46fff33f9a9555 100644 (file)
 #ifndef __ASSEMBLY__
 
 #include <linux/compiler.h>
+#include <linux/stringify.h>
 #include <asm/hazards.h>
 
 #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
 
-__asm__(
-       "       .macro  arch_local_irq_disable\n"
+static inline void arch_local_irq_disable(void)
+{
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    noat                                            \n"
        "       di                                                      \n"
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
-
-static inline void arch_local_irq_disable(void)
-{
-       __asm__ __volatile__(
-               "arch_local_irq_disable"
-               : /* no outputs */
-               : /* no inputs */
-               : "memory");
+       : /* no outputs */
+       : /* no inputs */
+       : "memory");
 }
 
+static inline unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
 
-__asm__(
-       "       .macro  arch_local_irq_save result                      \n"
+       asm __volatile__(
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
-       "       di      \\result                                        \n"
-       "       andi    \\result, 1                                     \n"
-       "       irq_disable_hazard                                      \n"
+       "       di      %[flags]                                        \n"
+       "       andi    %[flags], 1                                     \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : [flags] "=r" (flags)
+       : /* no inputs */
+       : "memory");
 
-static inline unsigned long arch_local_irq_save(void)
-{
-       unsigned long flags;
-       asm volatile("arch_local_irq_save\t%0"
-                    : "=r" (flags)
-                    : /* no inputs */
-                    : "memory");
        return flags;
 }
 
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+       unsigned long __tmp1;
 
-__asm__(
-       "       .macro  arch_local_irq_restore flags                    \n"
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    noreorder                                       \n"
        "       .set    noat                                            \n"
@@ -69,7 +64,7 @@ __asm__(
         * Slow, but doesn't suffer from a relatively unlikely race
         * condition we're having since days 1.
         */
-       "       beqz    \\flags, 1f                                     \n"
+       "       beqz    %[flags], 1f                                    \n"
        "       di                                                      \n"
        "       ei                                                      \n"
        "1:                                                             \n"
@@ -78,33 +73,44 @@ __asm__(
         * Fast, dangerous.  Life is fun, life is good.
         */
        "       mfc0    $1, $12                                         \n"
-       "       ins     $1, \\flags, 0, 1                               \n"
+       "       ins     $1, %[flags], 0, 1                              \n"
        "       mtc0    $1, $12                                         \n"
 #endif
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
-
-static inline void arch_local_irq_restore(unsigned long flags)
-{
-       unsigned long __tmp1;
-
-       __asm__ __volatile__(
-               "arch_local_irq_restore\t%0"
-               : "=r" (__tmp1)
-               : "0" (flags)
-               : "memory");
+       : [flags] "=r" (__tmp1)
+       : "0" (flags)
+       : "memory");
 }
 
 static inline void __arch_local_irq_restore(unsigned long flags)
 {
-       unsigned long __tmp1;
-
        __asm__ __volatile__(
-               "arch_local_irq_restore\t%0"
-               : "=r" (__tmp1)
-               : "0" (flags)
-               : "memory");
+       "       .set    push                                            \n"
+       "       .set    noreorder                                       \n"
+       "       .set    noat                                            \n"
+#if defined(CONFIG_IRQ_CPU)
+       /*
+        * Slow, but doesn't suffer from a relatively unlikely race
+        * condition we're having since days 1.
+        */
+       "       beqz    %[flags], 1f                                    \n"
+       "       di                                                      \n"
+       "       ei                                                      \n"
+       "1:                                                             \n"
+#else
+       /*
+        * Fast, dangerous.  Life is fun, life is good.
+        */
+       "       mfc0    $1, $12                                         \n"
+       "       ins     $1, %[flags], 0, 1                              \n"
+       "       mtc0    $1, $12                                         \n"
+#endif
+       "       " __stringify(__irq_disable_hazard) "                   \n"
+       "       .set    pop                                             \n"
+       : [flags] "=r" (flags)
+       : "0" (flags)
+       : "memory");
 }
 #else
 /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
@@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags);
 #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
 
 
-__asm__(
-       "       .macro  arch_local_irq_enable                           \n"
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC kernel needs to do a software replay of queued
+        * IPIs, at the cost of call overhead on each local_irq_enable()
+        */
+       smtc_ipi_replay();
+#endif
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
@@ -133,45 +149,28 @@ __asm__(
        "       xori    $1,0x1e                                         \n"
        "       mtc0    $1,$12                                          \n"
 #endif
-       "       irq_enable_hazard                                       \n"
+       "       " __stringify(__irq_enable_hazard) "                    \n"
        "       .set    pop                                             \n"
-       "       .endm");
-
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC kernel needs to do a software replay of queued
-        * IPIs, at the cost of call overhead on each local_irq_enable()
-        */
-       smtc_ipi_replay();
-#endif
-       __asm__ __volatile__(
-               "arch_local_irq_enable"
-               : /* no outputs */
-               : /* no inputs */
-               : "memory");
+       : /* no outputs */
+       : /* no inputs */
+       : "memory");
 }
 
+static inline unsigned long arch_local_save_flags(void)
+{
+       unsigned long flags;
 
-__asm__(
-       "       .macro  arch_local_save_flags flags                     \n"
+       asm __volatile__(
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
 #ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    \\flags, $2, 1                                  \n"
+       "       mfc0    %[flags], $2, 1                                 \n"
 #else
-       "       mfc0    \\flags, $12                                    \n"
+       "       mfc0    %[flags], $12                                   \n"
 #endif
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : [flags] "=r" (flags));
 
-static inline unsigned long arch_local_save_flags(void)
-{
-       unsigned long flags;
-       asm volatile("arch_local_save_flags %0" : "=r" (flags));
        return flags;
 }
 
diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/asm/kvm.h
new file mode 100644 (file)
index 0000000..85789ea
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __LINUX_KVM_MIPS_H
+#define __LINUX_KVM_MIPS_H
+
+#include <linux/types.h>
+
+#define __KVM_MIPS
+
+#define N_MIPS_COPROC_REGS      32
+#define N_MIPS_COPROC_SEL      8
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+       __u32 gprs[32];
+       __u32 hi;
+       __u32 lo;
+       __u32 pc;
+
+       __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+};
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+};
+
+struct kvm_debug_exit_arch {
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+struct kvm_mips_interrupt {
+       /* in */
+       __u32 cpu;
+       __u32 irq;
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+#endif /* __LINUX_KVM_MIPS_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
new file mode 100644 (file)
index 0000000..e68781e
--- /dev/null
@@ -0,0 +1,667 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __MIPS_KVM_HOST_H__
+#define __MIPS_KVM_HOST_H__
+
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
+
+
+#define KVM_MAX_VCPUS          1
+#define KVM_USER_MEM_SLOTS     8
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS  0
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+/* Don't support huge pages */
+#define KVM_HPAGE_GFN_SHIFT(x) 0
+
+/* We don't currently support large pages. */
+#define KVM_NR_PAGE_SIZES      1
+#define KVM_PAGES_PER_HPAGE(x) 1
+
+
+
+/* Special address that contains the comm page, used for reducing # of traps */
+#define KVM_GUEST_COMMPAGE_ADDR     0x0
+
+#define KVM_GUEST_KERNEL_MODE(vcpu)    ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
+                                       ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
+
+#define KVM_GUEST_KUSEG             0x00000000UL
+#define KVM_GUEST_KSEG0             0x40000000UL
+#define KVM_GUEST_KSEG23            0x60000000UL
+#define KVM_GUEST_KSEGX(a)          ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_CPHYSADDR(a)      ((_ACAST32_(a)) & 0x1fffffff)
+
+#define KVM_GUEST_CKSEG0ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_CKSEG1ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_CKSEG23ADDR(a)       (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+/*
+ * Map an address to a certain kernel segment
+ */
+#define KVM_GUEST_KSEG0ADDR(a)         (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
+#define KVM_GUEST_KSEG1ADDR(a)         (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
+#define KVM_GUEST_KSEG23ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
+
+#define KVM_INVALID_PAGE            0xdeadbeef
+#define KVM_INVALID_INST            0xdeadbeef
+#define KVM_INVALID_ADDR            0xdeadbeef
+
+#define KVM_MALTA_GUEST_RTC_ADDR    0xb8000070UL
+
+#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
+#define MS_TO_NS(x) (x * 1E6L)
+
+#define CAUSEB_DC       27
+#define CAUSEF_DC       (_ULCAST_(1)   << 27)
+
+struct kvm;
+struct kvm_run;
+struct kvm_vcpu;
+struct kvm_interrupt;
+
+extern atomic_t kvm_mips_instance;
+extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+
+struct kvm_vm_stat {
+       u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+       u32 wait_exits;
+       u32 cache_exits;
+       u32 signal_exits;
+       u32 int_exits;
+       u32 cop_unusable_exits;
+       u32 tlbmod_exits;
+       u32 tlbmiss_ld_exits;
+       u32 tlbmiss_st_exits;
+       u32 addrerr_st_exits;
+       u32 addrerr_ld_exits;
+       u32 syscall_exits;
+       u32 resvd_inst_exits;
+       u32 break_inst_exits;
+       u32 flush_dcache_exits;
+       u32 halt_wakeup;
+};
+
+enum kvm_mips_exit_types {
+       WAIT_EXITS,
+       CACHE_EXITS,
+       SIGNAL_EXITS,
+       INT_EXITS,
+       COP_UNUSABLE_EXITS,
+       TLBMOD_EXITS,
+       TLBMISS_LD_EXITS,
+       TLBMISS_ST_EXITS,
+       ADDRERR_ST_EXITS,
+       ADDRERR_LD_EXITS,
+       SYSCALL_EXITS,
+       RESVD_INST_EXITS,
+       BREAK_INST_EXITS,
+       FLUSH_DCACHE_EXITS,
+       MAX_KVM_MIPS_EXIT_TYPES
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_arch {
+       /* Guest GVA->HPA page table */
+       unsigned long *guest_pmap;
+       unsigned long guest_pmap_npages;
+
+       /* Wired host TLB used for the commpage */
+       int commpage_tlb;
+};
+
+#define N_MIPS_COPROC_REGS      32
+#define N_MIPS_COPROC_SEL      8
+
+struct mips_coproc {
+       unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+       unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
+#endif
+};
+
+/*
+ * Coprocessor 0 register names
+ */
+#define        MIPS_CP0_TLB_INDEX          0
+#define        MIPS_CP0_TLB_RANDOM         1
+#define        MIPS_CP0_TLB_LOW            2
+#define        MIPS_CP0_TLB_LO0            2
+#define        MIPS_CP0_TLB_LO1            3
+#define        MIPS_CP0_TLB_CONTEXT    4
+#define        MIPS_CP0_TLB_PG_MASK    5
+#define        MIPS_CP0_TLB_WIRED          6
+#define        MIPS_CP0_HWRENA             7
+#define        MIPS_CP0_BAD_VADDR          8
+#define        MIPS_CP0_COUNT          9
+#define        MIPS_CP0_TLB_HI         10
+#define        MIPS_CP0_COMPARE            11
+#define        MIPS_CP0_STATUS         12
+#define        MIPS_CP0_CAUSE          13
+#define        MIPS_CP0_EXC_PC         14
+#define        MIPS_CP0_PRID               15
+#define        MIPS_CP0_CONFIG         16
+#define        MIPS_CP0_LLADDR         17
+#define        MIPS_CP0_WATCH_LO           18
+#define        MIPS_CP0_WATCH_HI           19
+#define        MIPS_CP0_TLB_XCONTEXT   20
+#define        MIPS_CP0_ECC                26
+#define        MIPS_CP0_CACHE_ERR          27
+#define        MIPS_CP0_TAG_LO         28
+#define        MIPS_CP0_TAG_HI         29
+#define        MIPS_CP0_ERROR_PC           30
+#define        MIPS_CP0_DEBUG          23
+#define        MIPS_CP0_DEPC               24
+#define        MIPS_CP0_PERFCNT            25
+#define        MIPS_CP0_ERRCTL         26
+#define        MIPS_CP0_DATA_LO            28
+#define        MIPS_CP0_DATA_HI            29
+#define        MIPS_CP0_DESAVE         31
+
+#define MIPS_CP0_CONFIG_SEL        0
+#define MIPS_CP0_CONFIG1_SEL    1
+#define MIPS_CP0_CONFIG2_SEL    2
+#define MIPS_CP0_CONFIG3_SEL    3
+
+/* Config0 register bits */
+#define CP0C0_M    31
+#define CP0C0_K23  28
+#define CP0C0_KU   25
+#define CP0C0_MDU  20
+#define CP0C0_MM   17
+#define CP0C0_BM   16
+#define CP0C0_BE   15
+#define CP0C0_AT   13
+#define CP0C0_AR   10
+#define CP0C0_MT   7
+#define CP0C0_VI   3
+#define CP0C0_K0   0
+
+/* Config1 register bits */
+#define CP0C1_M    31
+#define CP0C1_MMU  25
+#define CP0C1_IS   22
+#define CP0C1_IL   19
+#define CP0C1_IA   16
+#define CP0C1_DS   13
+#define CP0C1_DL   10
+#define CP0C1_DA   7
+#define CP0C1_C2   6
+#define CP0C1_MD   5
+#define CP0C1_PC   4
+#define CP0C1_WR   3
+#define CP0C1_CA   2
+#define CP0C1_EP   1
+#define CP0C1_FP   0
+
+/* Config2 Register bits */
+#define CP0C2_M    31
+#define CP0C2_TU   28
+#define CP0C2_TS   24
+#define CP0C2_TL   20
+#define CP0C2_TA   16
+#define CP0C2_SU   12
+#define CP0C2_SS   8
+#define CP0C2_SL   4
+#define CP0C2_SA   0
+
+/* Config3 Register bits */
+#define CP0C3_M    31
+#define CP0C3_ISA_ON_EXC 16
+#define CP0C3_ULRI  13
+#define CP0C3_DSPP 10
+#define CP0C3_LPA  7
+#define CP0C3_VEIC 6
+#define CP0C3_VInt 5
+#define CP0C3_SP   4
+#define CP0C3_MT   2
+#define CP0C3_SM   1
+#define CP0C3_TL   0
+
+/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
+#define MIPS_CONFIG0                                              \
+  ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
+
+/* Have config2, no coprocessor2 attached, no MDMX support attached,
+   no performance counters, watch registers present,
+   no code compression, EJTAG present, no FPU, no watch registers */
+#define MIPS_CONFIG1                                              \
+((1 << CP0C1_M) |                                                 \
+ (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) |            \
+ (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) |            \
+ (0 << CP0C1_FP))
+
+/* Have config3, no tertiary/secondary caches implemented */
+#define MIPS_CONFIG2                                              \
+((1 << CP0C2_M))
+
+/* No config4, no DSP ASE, no large physaddr (PABITS),
+   no external interrupt controller, no vectored interrupts,
+   no 1kb pages, no SmartMIPS ASE, no trace logic */
+#define MIPS_CONFIG3                                              \
+((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) |          \
+ (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) |        \
+ (0 << CP0C3_SM) | (0 << CP0C3_TL))
+
+/* MMU types, the first four entries have the same layout as the
+   CP0C0_MT field.  */
+enum mips_mmu_types {
+       MMU_TYPE_NONE,
+       MMU_TYPE_R4000,
+       MMU_TYPE_RESERVED,
+       MMU_TYPE_FMT,
+       MMU_TYPE_R3000,
+       MMU_TYPE_R6000,
+       MMU_TYPE_R8000
+};
+
+/*
+ * Trap codes
+ */
+#define T_INT           0      /* Interrupt pending */
+#define T_TLB_MOD       1      /* TLB modified fault */
+#define T_TLB_LD_MISS       2  /* TLB miss on load or ifetch */
+#define T_TLB_ST_MISS       3  /* TLB miss on a store */
+#define T_ADDR_ERR_LD       4  /* Address error on a load or ifetch */
+#define T_ADDR_ERR_ST       5  /* Address error on a store */
+#define T_BUS_ERR_IFETCH    6  /* Bus error on an ifetch */
+#define T_BUS_ERR_LD_ST     7  /* Bus error on a load or store */
+#define T_SYSCALL       8      /* System call */
+#define T_BREAK         9      /* Breakpoint */
+#define T_RES_INST      10     /* Reserved instruction exception */
+#define T_COP_UNUSABLE      11 /* Coprocessor unusable */
+#define T_OVFLOW        12     /* Arithmetic overflow */
+
+/*
+ * Trap definitions added for r4000 port.
+ */
+#define T_TRAP          13     /* Trap instruction */
+#define T_VCEI          14     /* Virtual coherency exception */
+#define T_FPE           15     /* Floating point exception */
+#define T_WATCH         23     /* Watch address reference */
+#define T_VCED          31     /* Virtual coherency data */
+
+/* Resume Flags */
+#define RESUME_FLAG_DR          (1<<0) /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST        (1<<1) /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_GUEST_DR         RESUME_FLAG_DR
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+enum emulation_result {
+       EMULATE_DONE,           /* no further processing */
+       EMULATE_DO_MMIO,        /* kvm_run filled with MMIO request */
+       EMULATE_FAIL,           /* can't emulate this instruction */
+       EMULATE_WAIT,           /* WAIT instruction */
+       EMULATE_PRIV_FAIL,
+};
+
+#define MIPS3_PG_G  0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
+#define MIPS3_PG_V  0x00000002 /* Valid */
+#define MIPS3_PG_NV 0x00000000
+#define MIPS3_PG_D  0x00000004 /* Dirty */
+
+#define mips3_paddr_to_tlbpfn(x) \
+    (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
+#define mips3_tlbpfn_to_paddr(x) \
+    ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
+
+#define MIPS3_PG_SHIFT      6
+#define MIPS3_PG_FRAME      0x3fffffc0
+
+#define VPN2_MASK           0xffffe000
+#define TLB_IS_GLOBAL(x)    (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
+#define TLB_VPN2(x)         ((x).tlb_hi & VPN2_MASK)
+#define TLB_ASID(x)         (ASID_MASK((x).tlb_hi))
+#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
+
+struct kvm_mips_tlb {
+       long tlb_mask;
+       long tlb_hi;
+       long tlb_lo0;
+       long tlb_lo1;
+};
+
+#define KVM_MIPS_GUEST_TLB_SIZE     64
+struct kvm_vcpu_arch {
+       void *host_ebase, *guest_ebase;
+       unsigned long host_stack;
+       unsigned long host_gp;
+
+       /* Host CP0 registers used when handling exits from guest */
+       unsigned long host_cp0_badvaddr;
+       unsigned long host_cp0_cause;
+       unsigned long host_cp0_epc;
+       unsigned long host_cp0_entryhi;
+       uint32_t guest_inst;
+
+       /* GPRS */
+       unsigned long gprs[32];
+       unsigned long hi;
+       unsigned long lo;
+       unsigned long pc;
+
+       /* FPU State */
+       struct mips_fpu_struct fpu;
+
+       /* COP0 State */
+       struct mips_coproc *cop0;
+
+       /* Host KSEG0 address of the EI/DI offset */
+       void *kseg0_commpage;
+
+       u32 io_gpr;             /* GPR used as IO source/target */
+
+       /* Used to calibrate the virutal count register for the guest */
+       int32_t host_cp0_count;
+
+       /* Bitmask of exceptions that are pending */
+       unsigned long pending_exceptions;
+
+       /* Bitmask of pending exceptions to be cleared */
+       unsigned long pending_exceptions_clr;
+
+       unsigned long pending_load_cause;
+
+       /* Save/Restore the entryhi register when are are preempted/scheduled back in */
+       unsigned long preempt_entryhi;
+
+       /* S/W Based TLB for guest */
+       struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
+
+       /* Cached guest kernel/user ASIDs */
+       uint32_t guest_user_asid[NR_CPUS];
+       uint32_t guest_kernel_asid[NR_CPUS];
+       struct mm_struct guest_kernel_mm, guest_user_mm;
+
+       struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE];
+
+
+       struct hrtimer comparecount_timer;
+
+       int last_sched_cpu;
+
+       /* WAIT executed */
+       int wait;
+};
+
+
+#define kvm_read_c0_guest_index(cop0)               (cop0->reg[MIPS_CP0_TLB_INDEX][0])
+#define kvm_write_c0_guest_index(cop0, val)         (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
+#define kvm_read_c0_guest_entrylo0(cop0)            (cop0->reg[MIPS_CP0_TLB_LO0][0])
+#define kvm_read_c0_guest_entrylo1(cop0)            (cop0->reg[MIPS_CP0_TLB_LO1][0])
+#define kvm_read_c0_guest_context(cop0)             (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
+#define kvm_write_c0_guest_context(cop0, val)       (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
+#define kvm_read_c0_guest_userlocal(cop0)           (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
+#define kvm_read_c0_guest_pagemask(cop0)            (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
+#define kvm_write_c0_guest_pagemask(cop0, val)      (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
+#define kvm_read_c0_guest_wired(cop0)               (cop0->reg[MIPS_CP0_TLB_WIRED][0])
+#define kvm_write_c0_guest_wired(cop0, val)         (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
+#define kvm_read_c0_guest_badvaddr(cop0)            (cop0->reg[MIPS_CP0_BAD_VADDR][0])
+#define kvm_write_c0_guest_badvaddr(cop0, val)      (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
+#define kvm_read_c0_guest_count(cop0)               (cop0->reg[MIPS_CP0_COUNT][0])
+#define kvm_write_c0_guest_count(cop0, val)         (cop0->reg[MIPS_CP0_COUNT][0] = (val))
+#define kvm_read_c0_guest_entryhi(cop0)             (cop0->reg[MIPS_CP0_TLB_HI][0])
+#define kvm_write_c0_guest_entryhi(cop0, val)       (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
+#define kvm_read_c0_guest_compare(cop0)             (cop0->reg[MIPS_CP0_COMPARE][0])
+#define kvm_write_c0_guest_compare(cop0, val)       (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
+#define kvm_read_c0_guest_status(cop0)              (cop0->reg[MIPS_CP0_STATUS][0])
+#define kvm_write_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] = (val))
+#define kvm_read_c0_guest_intctl(cop0)              (cop0->reg[MIPS_CP0_STATUS][1])
+#define kvm_write_c0_guest_intctl(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][1] = (val))
+#define kvm_read_c0_guest_cause(cop0)               (cop0->reg[MIPS_CP0_CAUSE][0])
+#define kvm_write_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
+#define kvm_read_c0_guest_epc(cop0)                 (cop0->reg[MIPS_CP0_EXC_PC][0])
+#define kvm_write_c0_guest_epc(cop0, val)           (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
+#define kvm_read_c0_guest_prid(cop0)                (cop0->reg[MIPS_CP0_PRID][0])
+#define kvm_write_c0_guest_prid(cop0, val)          (cop0->reg[MIPS_CP0_PRID][0] = (val))
+#define kvm_read_c0_guest_ebase(cop0)               (cop0->reg[MIPS_CP0_PRID][1])
+#define kvm_write_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] = (val))
+#define kvm_read_c0_guest_config(cop0)              (cop0->reg[MIPS_CP0_CONFIG][0])
+#define kvm_read_c0_guest_config1(cop0)             (cop0->reg[MIPS_CP0_CONFIG][1])
+#define kvm_read_c0_guest_config2(cop0)             (cop0->reg[MIPS_CP0_CONFIG][2])
+#define kvm_read_c0_guest_config3(cop0)             (cop0->reg[MIPS_CP0_CONFIG][3])
+#define kvm_read_c0_guest_config7(cop0)             (cop0->reg[MIPS_CP0_CONFIG][7])
+#define kvm_write_c0_guest_config(cop0, val)        (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
+#define kvm_write_c0_guest_config1(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
+#define kvm_write_c0_guest_config2(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
+#define kvm_write_c0_guest_config3(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
+#define kvm_write_c0_guest_config7(cop0, val)       (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
+#define kvm_read_c0_guest_errorepc(cop0)            (cop0->reg[MIPS_CP0_ERROR_PC][0])
+#define kvm_write_c0_guest_errorepc(cop0, val)      (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
+
+#define kvm_set_c0_guest_status(cop0, val)          (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
+#define kvm_clear_c0_guest_status(cop0, val)        (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
+#define kvm_set_c0_guest_cause(cop0, val)           (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
+#define kvm_clear_c0_guest_cause(cop0, val)         (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
+#define kvm_change_c0_guest_cause(cop0, change, val)  \
+{                                                     \
+    kvm_clear_c0_guest_cause(cop0, change);           \
+    kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
+}
+#define kvm_set_c0_guest_ebase(cop0, val)           (cop0->reg[MIPS_CP0_PRID][1] |= (val))
+#define kvm_clear_c0_guest_ebase(cop0, val)         (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
+#define kvm_change_c0_guest_ebase(cop0, change, val)  \
+{                                                     \
+    kvm_clear_c0_guest_ebase(cop0, change);           \
+    kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
+}
+
+
+struct kvm_mips_callbacks {
+       int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
+       int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
+       int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
+       int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
+       int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
+       int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
+       int (*handle_syscall) (struct kvm_vcpu *vcpu);
+       int (*handle_res_inst) (struct kvm_vcpu *vcpu);
+       int (*handle_break) (struct kvm_vcpu *vcpu);
+       int (*vm_init) (struct kvm *kvm);
+       int (*vcpu_init) (struct kvm_vcpu *vcpu);
+       int (*vcpu_setup) (struct kvm_vcpu *vcpu);
+        gpa_t(*gva_to_gpa) (gva_t gva);
+       void (*queue_timer_int) (struct kvm_vcpu *vcpu);
+       void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
+       void (*queue_io_int) (struct kvm_vcpu *vcpu,
+                             struct kvm_mips_interrupt *irq);
+       void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
+                               struct kvm_mips_interrupt *irq);
+       int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
+                           uint32_t cause);
+       int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
+                         uint32_t cause);
+       int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
+                                   struct kvm_regs *regs);
+       int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
+                                   struct kvm_regs *regs);
+};
+extern struct kvm_mips_callbacks *kvm_mips_callbacks;
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
+
+/* Debug: dump vcpu state */
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
+
+/* Trampoline ASM routine to start running in "Guest" context */
+extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+/* TLB handling */
+uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
+
+uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
+
+uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
+                                          struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+                                             struct kvm_vcpu *vcpu);
+
+extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+                                               struct kvm_mips_tlb *tlb,
+                                               unsigned long *hpa0,
+                                               unsigned long *hpa1);
+
+extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
+                                                   uint32_t *opc,
+                                                   struct kvm_run *run,
+                                                   struct kvm_vcpu *vcpu);
+
+extern void kvm_mips_dump_host_tlbs(void);
+extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
+extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu);
+extern void kvm_mips_flush_host_tlb(int skip_kseg0);
+extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
+extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
+
+extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
+                                    unsigned long entryhi);
+extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
+extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+                                                  unsigned long gva);
+extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+                                   struct kvm_vcpu *vcpu);
+extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu);
+extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu);
+extern void kvm_local_flush_tlb_all(void);
+extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu);
+extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
+extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
+
+/* Emulation */
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
+
+extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
+                                                  uint32_t *opc,
+                                                  struct kvm_run *run,
+                                                  struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
+                                                     uint32_t *opc,
+                                                     struct kvm_run *run,
+                                                     struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
+                                                        uint32_t *opc,
+                                                        struct kvm_run *run,
+                                                        struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
+                                                       uint32_t *opc,
+                                                       struct kvm_run *run,
+                                                       struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
+                                                        uint32_t *opc,
+                                                        struct kvm_run *run,
+                                                        struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
+                                                       uint32_t *opc,
+                                                       struct kvm_run *run,
+                                                       struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
+                                                     uint32_t *opc,
+                                                     struct kvm_run *run,
+                                                     struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
+                                               uint32_t *opc,
+                                               struct kvm_run *run,
+                                               struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
+                                                    uint32_t *opc,
+                                                    struct kvm_run *run,
+                                                    struct kvm_vcpu *vcpu);
+
+extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+                                                        struct kvm_run *run);
+
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+                                              uint32_t *opc,
+                                              struct kvm_run *run,
+                                              struct kvm_vcpu *vcpu);
+
+enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
+                                            uint32_t *opc,
+                                            uint32_t cause,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
+                                          uint32_t *opc,
+                                          uint32_t cause,
+                                          struct kvm_run *run,
+                                          struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_store(uint32_t inst,
+                                            uint32_t cause,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu);
+enum emulation_result kvm_mips_emulate_load(uint32_t inst,
+                                           uint32_t cause,
+                                           struct kvm_run *run,
+                                           struct kvm_vcpu *vcpu);
+
+/* Dynamic binary translation */
+extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+                                     struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+                                  struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
+                              struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
+                              struct kvm_vcpu *vcpu);
+
+/* Misc */
+extern void mips32_SyncICache(unsigned long addr, unsigned long size);
+extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
+extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
+
+
+#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h
deleted file mode 100644 (file)
index 8fcf8df..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef BCM63XX_CLK_H_
-#define BCM63XX_CLK_H_
-
-struct clk {
-       void            (*set)(struct clk *, int);
-       unsigned int    rate;
-       unsigned int    usage;
-       int             id;
-};
-
-#endif /* ! BCM63XX_CLK_H_ */
index cb922b9cb0e9b1e7cc96f36149a24bd8b97e70cb..336228990808e5c4aed717f21682ebd1c85045bc 100644 (file)
 #define BCM6345_CPU_ID         0x6345
 #define BCM6348_CPU_ID         0x6348
 #define BCM6358_CPU_ID         0x6358
+#define BCM6362_CPU_ID         0x6362
 #define BCM6368_CPU_ID         0x6368
 
 void __init bcm63xx_cpu_init(void);
 u16 __bcm63xx_get_cpu_id(void);
-u16 bcm63xx_get_cpu_rev(void);
+u8 bcm63xx_get_cpu_rev(void);
 unsigned int bcm63xx_get_cpu_freq(void);
 
 #ifdef CONFIG_BCM63XX_CPU_6328
@@ -86,6 +87,20 @@ unsigned int bcm63xx_get_cpu_freq(void);
 # define BCMCPU_IS_6358()      (0)
 #endif
 
+#ifdef CONFIG_BCM63XX_CPU_6362
+# ifdef bcm63xx_get_cpu_id
+#  undef bcm63xx_get_cpu_id
+#  define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id()
+#  define BCMCPU_RUNTIME_DETECT
+# else
+#  define bcm63xx_get_cpu_id() BCM6362_CPU_ID
+# endif
+# define BCMCPU_IS_6362()      (bcm63xx_get_cpu_id() == BCM6362_CPU_ID)
+#else
+# define BCMCPU_IS_6362()      (0)
+#endif
+
+
 #ifdef CONFIG_BCM63XX_CPU_6368
 # ifdef bcm63xx_get_cpu_id
 #  undef bcm63xx_get_cpu_id
@@ -405,6 +420,62 @@ enum bcm63xx_regs_set {
 #define BCM_6358_MISC_BASE             (0xdeadbeef)
 
 
+/*
+ * 6362 register sets base address
+ */
+#define BCM_6362_DSL_LMEM_BASE         (0xdeadbeef)
+#define BCM_6362_PERF_BASE             (0xb0000000)
+#define BCM_6362_TIMER_BASE            (0xb0000040)
+#define BCM_6362_WDT_BASE              (0xb000005c)
+#define BCM_6362_UART0_BASE             (0xb0000100)
+#define BCM_6362_UART1_BASE            (0xb0000120)
+#define BCM_6362_GPIO_BASE             (0xb0000080)
+#define BCM_6362_SPI_BASE              (0xb0000800)
+#define BCM_6362_HSSPI_BASE            (0xb0001000)
+#define BCM_6362_UDC0_BASE             (0xdeadbeef)
+#define BCM_6362_USBDMA_BASE           (0xb000c000)
+#define BCM_6362_OHCI0_BASE            (0xb0002600)
+#define BCM_6362_OHCI_PRIV_BASE                (0xdeadbeef)
+#define BCM_6362_USBH_PRIV_BASE                (0xb0002700)
+#define BCM_6362_USBD_BASE             (0xb0002400)
+#define BCM_6362_MPI_BASE              (0xdeadbeef)
+#define BCM_6362_PCMCIA_BASE           (0xdeadbeef)
+#define BCM_6362_PCIE_BASE             (0xb0e40000)
+#define BCM_6362_SDRAM_REGS_BASE       (0xdeadbeef)
+#define BCM_6362_DSL_BASE              (0xdeadbeef)
+#define BCM_6362_UBUS_BASE             (0xdeadbeef)
+#define BCM_6362_ENET0_BASE            (0xdeadbeef)
+#define BCM_6362_ENET1_BASE            (0xdeadbeef)
+#define BCM_6362_ENETDMA_BASE          (0xb000d800)
+#define BCM_6362_ENETDMAC_BASE         (0xb000da00)
+#define BCM_6362_ENETDMAS_BASE         (0xb000dc00)
+#define BCM_6362_ENETSW_BASE           (0xb0e00000)
+#define BCM_6362_EHCI0_BASE            (0xb0002500)
+#define BCM_6362_SDRAM_BASE            (0xdeadbeef)
+#define BCM_6362_MEMC_BASE             (0xdeadbeef)
+#define BCM_6362_DDR_BASE              (0xb0003000)
+#define BCM_6362_M2M_BASE              (0xdeadbeef)
+#define BCM_6362_ATM_BASE              (0xdeadbeef)
+#define BCM_6362_XTM_BASE              (0xb0007800)
+#define BCM_6362_XTMDMA_BASE           (0xb000b800)
+#define BCM_6362_XTMDMAC_BASE          (0xdeadbeef)
+#define BCM_6362_XTMDMAS_BASE          (0xdeadbeef)
+#define BCM_6362_PCM_BASE              (0xb000a800)
+#define BCM_6362_PCMDMA_BASE           (0xdeadbeef)
+#define BCM_6362_PCMDMAC_BASE          (0xdeadbeef)
+#define BCM_6362_PCMDMAS_BASE          (0xdeadbeef)
+#define BCM_6362_RNG_BASE              (0xdeadbeef)
+#define BCM_6362_MISC_BASE             (0xb0001800)
+
+#define BCM_6362_NAND_REG_BASE         (0xb0000200)
+#define BCM_6362_NAND_CACHE_BASE       (0xb0000600)
+#define BCM_6362_LED_BASE              (0xb0001900)
+#define BCM_6362_IPSEC_BASE            (0xb0002800)
+#define BCM_6362_IPSEC_DMA_BASE                (0xb000d000)
+#define BCM_6362_WLAN_CHIPCOMMON_BASE  (0xb0004000)
+#define BCM_6362_WLAN_D11_BASE         (0xb0005000)
+#define BCM_6362_WLAN_SHIM_BASE                (0xb0007000)
+
 /*
  * 6368 register sets base address
  */
@@ -564,6 +635,9 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
 #ifdef CONFIG_BCM63XX_CPU_6358
        __GEN_RSET(6358)
 #endif
+#ifdef CONFIG_BCM63XX_CPU_6362
+       __GEN_RSET(6362)
+#endif
 #ifdef CONFIG_BCM63XX_CPU_6368
        __GEN_RSET(6368)
 #endif
@@ -819,6 +893,71 @@ enum bcm63xx_irq {
 #define BCM_6358_EXT_IRQ2              (IRQ_INTERNAL_BASE + 27)
 #define BCM_6358_EXT_IRQ3              (IRQ_INTERNAL_BASE + 28)
 
+/*
+ * 6362 irqs
+ */
+#define BCM_6362_HIGH_IRQ_BASE         (IRQ_INTERNAL_BASE + 32)
+
+#define BCM_6362_TIMER_IRQ             (IRQ_INTERNAL_BASE + 0)
+#define BCM_6362_SPI_IRQ               (IRQ_INTERNAL_BASE + 2)
+#define BCM_6362_UART0_IRQ             (IRQ_INTERNAL_BASE + 3)
+#define BCM_6362_UART1_IRQ             (IRQ_INTERNAL_BASE + 4)
+#define BCM_6362_DSL_IRQ               (IRQ_INTERNAL_BASE + 28)
+#define BCM_6362_UDC0_IRQ              0
+#define BCM_6362_ENET0_IRQ             0
+#define BCM_6362_ENET1_IRQ             0
+#define BCM_6362_ENET_PHY_IRQ          (IRQ_INTERNAL_BASE + 14)
+#define BCM_6362_HSSPI_IRQ             (IRQ_INTERNAL_BASE + 5)
+#define BCM_6362_OHCI0_IRQ             (IRQ_INTERNAL_BASE + 9)
+#define BCM_6362_EHCI0_IRQ             (IRQ_INTERNAL_BASE + 10)
+#define BCM_6362_USBD_IRQ              (IRQ_INTERNAL_BASE + 11)
+#define BCM_6362_USBD_RXDMA0_IRQ       (IRQ_INTERNAL_BASE + 20)
+#define BCM_6362_USBD_TXDMA0_IRQ       (IRQ_INTERNAL_BASE + 21)
+#define BCM_6362_USBD_RXDMA1_IRQ       (IRQ_INTERNAL_BASE + 22)
+#define BCM_6362_USBD_TXDMA1_IRQ       (IRQ_INTERNAL_BASE + 23)
+#define BCM_6362_USBD_RXDMA2_IRQ       (IRQ_INTERNAL_BASE + 24)
+#define BCM_6362_USBD_TXDMA2_IRQ       (IRQ_INTERNAL_BASE + 25)
+#define BCM_6362_PCMCIA_IRQ            0
+#define BCM_6362_ENET0_RXDMA_IRQ       0
+#define BCM_6362_ENET0_TXDMA_IRQ       0
+#define BCM_6362_ENET1_RXDMA_IRQ       0
+#define BCM_6362_ENET1_TXDMA_IRQ       0
+#define BCM_6362_PCI_IRQ               (IRQ_INTERNAL_BASE + 30)
+#define BCM_6362_ATM_IRQ               0
+#define BCM_6362_ENETSW_RXDMA0_IRQ     (BCM_6362_HIGH_IRQ_BASE + 0)
+#define BCM_6362_ENETSW_RXDMA1_IRQ     (BCM_6362_HIGH_IRQ_BASE + 1)
+#define BCM_6362_ENETSW_RXDMA2_IRQ     (BCM_6362_HIGH_IRQ_BASE + 2)
+#define BCM_6362_ENETSW_RXDMA3_IRQ     (BCM_6362_HIGH_IRQ_BASE + 3)
+#define BCM_6362_ENETSW_TXDMA0_IRQ     0
+#define BCM_6362_ENETSW_TXDMA1_IRQ     0
+#define BCM_6362_ENETSW_TXDMA2_IRQ     0
+#define BCM_6362_ENETSW_TXDMA3_IRQ     0
+#define BCM_6362_XTM_IRQ               0
+#define BCM_6362_XTM_DMA0_IRQ          (BCM_6362_HIGH_IRQ_BASE + 12)
+
+#define BCM_6362_RING_OSC_IRQ          (IRQ_INTERNAL_BASE + 1)
+#define BCM_6362_WLAN_GPIO_IRQ         (IRQ_INTERNAL_BASE + 6)
+#define BCM_6362_WLAN_IRQ              (IRQ_INTERNAL_BASE + 7)
+#define BCM_6362_IPSEC_IRQ             (IRQ_INTERNAL_BASE + 8)
+#define BCM_6362_NAND_IRQ              (IRQ_INTERNAL_BASE + 12)
+#define BCM_6362_PCM_IRQ               (IRQ_INTERNAL_BASE + 13)
+#define BCM_6362_DG_IRQ                        (IRQ_INTERNAL_BASE + 15)
+#define BCM_6362_EPHY_ENERGY0_IRQ      (IRQ_INTERNAL_BASE + 16)
+#define BCM_6362_EPHY_ENERGY1_IRQ      (IRQ_INTERNAL_BASE + 17)
+#define BCM_6362_EPHY_ENERGY2_IRQ      (IRQ_INTERNAL_BASE + 18)
+#define BCM_6362_EPHY_ENERGY3_IRQ      (IRQ_INTERNAL_BASE + 19)
+#define BCM_6362_IPSEC_DMA0_IRQ                (IRQ_INTERNAL_BASE + 26)
+#define BCM_6362_IPSEC_DMA1_IRQ                (IRQ_INTERNAL_BASE + 27)
+#define BCM_6362_FAP0_IRQ              (IRQ_INTERNAL_BASE + 29)
+#define BCM_6362_PCM_DMA0_IRQ          (BCM_6362_HIGH_IRQ_BASE + 4)
+#define BCM_6362_PCM_DMA1_IRQ          (BCM_6362_HIGH_IRQ_BASE + 5)
+#define BCM_6362_DECT0_IRQ             (BCM_6362_HIGH_IRQ_BASE + 6)
+#define BCM_6362_DECT1_IRQ             (BCM_6362_HIGH_IRQ_BASE + 7)
+#define BCM_6362_EXT_IRQ0              (BCM_6362_HIGH_IRQ_BASE + 8)
+#define BCM_6362_EXT_IRQ1              (BCM_6362_HIGH_IRQ_BASE + 9)
+#define BCM_6362_EXT_IRQ2              (BCM_6362_HIGH_IRQ_BASE + 10)
+#define BCM_6362_EXT_IRQ3              (BCM_6362_HIGH_IRQ_BASE + 11)
+
 /*
  * 6368 irqs
  */
index b0184cf025755a49d5fc7ca6614e82c9866b4fd8..c426cabc620a1df26338693f77e0485e07402cb1 100644 (file)
@@ -71,18 +71,13 @@ static inline unsigned long bcm63xx_spireg(enum bcm63xx_regs_spi reg)
 
        return bcm63xx_regs_spi[reg];
 #else
-#ifdef CONFIG_BCM63XX_CPU_6338
-       __GEN_SPI_RSET(6338)
-#endif
-#ifdef CONFIG_BCM63XX_CPU_6348
+#if defined(CONFIG_BCM63XX_CPU_6338) || defined(CONFIG_BCM63XX_CPU_6348)
        __GEN_SPI_RSET(6348)
 #endif
-#ifdef CONFIG_BCM63XX_CPU_6358
+#if defined(CONFIG_BCM63XX_CPU_6358) || defined(CONFIG_BCM63XX_CPU_6362) || \
+       defined(CONFIG_BCM63XX_CPU_6368)
        __GEN_SPI_RSET(6358)
 #endif
-#ifdef CONFIG_BCM63XX_CPU_6368
-       __GEN_SPI_RSET(6368)
-#endif
 #endif
        return 0;
 }
index 0a9891f7580de557e702e7c4708139705b27605e..35baa1a60a64547a7594aa30e91ee7fedcc1f101 100644 (file)
@@ -17,6 +17,8 @@ static inline unsigned long bcm63xx_gpio_count(void)
                return 8;
        case BCM6345_CPU_ID:
                return 16;
+       case BCM6362_CPU_ID:
+               return 48;
        case BCM6368_CPU_ID:
                return 38;
        case BCM6348_CPU_ID:
index 81b4702f792a81693dbfc49db980276296967cff..3203fe49b34d4d55e6808c0c50f4d55a4f9f91b0 100644 (file)
@@ -10,7 +10,7 @@
 #define REV_CHIPID_SHIFT               16
 #define REV_CHIPID_MASK                        (0xffff << REV_CHIPID_SHIFT)
 #define REV_REVID_SHIFT                        0
-#define REV_REVID_MASK                 (0xffff << REV_REVID_SHIFT)
+#define REV_REVID_MASK                 (0xff << REV_REVID_SHIFT)
 
 /* Clock Control register */
 #define PERF_CKCTL_REG                 0x4
                                        CKCTL_6358_USBSU_EN |           \
                                        CKCTL_6358_EPHY_EN)
 
+#define CKCTL_6362_ADSL_QPROC_EN       (1 << 1)
+#define CKCTL_6362_ADSL_AFE_EN         (1 << 2)
+#define CKCTL_6362_ADSL_EN             (1 << 3)
+#define CKCTL_6362_MIPS_EN             (1 << 4)
+#define CKCTL_6362_WLAN_OCP_EN         (1 << 5)
+#define CKCTL_6362_SWPKT_USB_EN                (1 << 7)
+#define CKCTL_6362_SWPKT_SAR_EN                (1 << 8)
+#define CKCTL_6362_SAR_EN              (1 << 9)
+#define CKCTL_6362_ROBOSW_EN           (1 << 10)
+#define CKCTL_6362_PCM_EN              (1 << 11)
+#define CKCTL_6362_USBD_EN             (1 << 12)
+#define CKCTL_6362_USBH_EN             (1 << 13)
+#define CKCTL_6362_IPSEC_EN            (1 << 14)
+#define CKCTL_6362_SPI_EN              (1 << 15)
+#define CKCTL_6362_HSSPI_EN            (1 << 16)
+#define CKCTL_6362_PCIE_EN             (1 << 17)
+#define CKCTL_6362_FAP_EN              (1 << 18)
+#define CKCTL_6362_PHYMIPS_EN          (1 << 19)
+#define CKCTL_6362_NAND_EN             (1 << 20)
+
+#define CKCTL_6362_ALL_SAFE_EN         (CKCTL_6362_PHYMIPS_EN |        \
+                                       CKCTL_6362_ADSL_QPROC_EN |      \
+                                       CKCTL_6362_ADSL_AFE_EN |        \
+                                       CKCTL_6362_ADSL_EN |            \
+                                       CKCTL_6362_SAR_EN  |            \
+                                       CKCTL_6362_PCM_EN  |            \
+                                       CKCTL_6362_IPSEC_EN |           \
+                                       CKCTL_6362_USBD_EN |            \
+                                       CKCTL_6362_USBH_EN |            \
+                                       CKCTL_6362_ROBOSW_EN |          \
+                                       CKCTL_6362_PCIE_EN)
+
+
 #define CKCTL_6368_VDSL_QPROC_EN       (1 << 2)
 #define CKCTL_6368_VDSL_AFE_EN         (1 << 3)
 #define CKCTL_6368_VDSL_BONDING_EN     (1 << 4)
 #define PERF_IRQMASK_6345_REG          0xc
 #define PERF_IRQMASK_6348_REG          0xc
 #define PERF_IRQMASK_6358_REG          0xc
+#define PERF_IRQMASK_6362_REG          0x20
 #define PERF_IRQMASK_6368_REG          0x20
 
 /* Interrupt Status register */
 #define PERF_IRQSTAT_6345_REG          0x10
 #define PERF_IRQSTAT_6348_REG          0x10
 #define PERF_IRQSTAT_6358_REG          0x10
+#define PERF_IRQSTAT_6362_REG          0x28
 #define PERF_IRQSTAT_6368_REG          0x28
 
 /* External Interrupt Configuration register */
 #define PERF_EXTIRQ_CFG_REG_6345       0x14
 #define PERF_EXTIRQ_CFG_REG_6348       0x14
 #define PERF_EXTIRQ_CFG_REG_6358       0x14
+#define PERF_EXTIRQ_CFG_REG_6362       0x18
 #define PERF_EXTIRQ_CFG_REG_6368       0x18
 
 #define PERF_EXTIRQ_CFG_REG2_6368      0x1c
 #define PERF_SOFTRESET_REG             0x28
 #define PERF_SOFTRESET_6328_REG                0x10
 #define PERF_SOFTRESET_6358_REG                0x34
+#define PERF_SOFTRESET_6362_REG                0x10
 #define PERF_SOFTRESET_6368_REG                0x10
 
 #define SOFTRESET_6328_SPI_MASK                (1 << 0)
 #define SOFTRESET_6358_PCM_MASK                (1 << 13)
 #define SOFTRESET_6358_ADSL_MASK       (1 << 14)
 
+#define SOFTRESET_6362_SPI_MASK                (1 << 0)
+#define SOFTRESET_6362_IPSEC_MASK      (1 << 1)
+#define SOFTRESET_6362_EPHY_MASK       (1 << 2)
+#define SOFTRESET_6362_SAR_MASK                (1 << 3)
+#define SOFTRESET_6362_ENETSW_MASK     (1 << 4)
+#define SOFTRESET_6362_USBS_MASK       (1 << 5)
+#define SOFTRESET_6362_USBH_MASK       (1 << 6)
+#define SOFTRESET_6362_PCM_MASK                (1 << 7)
+#define SOFTRESET_6362_PCIE_CORE_MASK  (1 << 8)
+#define SOFTRESET_6362_PCIE_MASK       (1 << 9)
+#define SOFTRESET_6362_PCIE_EXT_MASK   (1 << 10)
+#define SOFTRESET_6362_WLAN_SHIM_MASK  (1 << 11)
+#define SOFTRESET_6362_DDR_PHY_MASK    (1 << 12)
+#define SOFTRESET_6362_FAP_MASK                (1 << 13)
+#define SOFTRESET_6362_WLAN_UBUS_MASK  (1 << 14)
+
 #define SOFTRESET_6368_SPI_MASK                (1 << 0)
 #define SOFTRESET_6368_MPI_MASK                (1 << 3)
 #define SOFTRESET_6368_EPHY_MASK       (1 << 6)
  * _REG relative to RSET_SPI
  *************************************************************************/
 
-/* BCM 6338 SPI core */
-#define SPI_6338_CMD                   0x00    /* 16-bits register */
-#define SPI_6338_INT_STATUS            0x02
-#define SPI_6338_INT_MASK_ST           0x03
-#define SPI_6338_INT_MASK              0x04
-#define SPI_6338_ST                    0x05
-#define SPI_6338_CLK_CFG               0x06
-#define SPI_6338_FILL_BYTE             0x07
-#define SPI_6338_MSG_TAIL              0x09
-#define SPI_6338_RX_TAIL               0x0b
-#define SPI_6338_MSG_CTL               0x40    /* 8-bits register */
-#define SPI_6338_MSG_CTL_WIDTH         8
-#define SPI_6338_MSG_DATA              0x41
-#define SPI_6338_MSG_DATA_SIZE         0x3f
-#define SPI_6338_RX_DATA               0x80
-#define SPI_6338_RX_DATA_SIZE          0x3f
-
-/* BCM 6348 SPI core */
+/* BCM 6338/6348 SPI core */
 #define SPI_6348_CMD                   0x00    /* 16-bits register */
 #define SPI_6348_INT_STATUS            0x02
 #define SPI_6348_INT_MASK_ST           0x03
 #define SPI_6348_RX_DATA               0x80
 #define SPI_6348_RX_DATA_SIZE          0x3f
 
-/* BCM 6358 SPI core */
+/* BCM 6358/6262/6368 SPI core */
 #define SPI_6358_MSG_CTL               0x00    /* 16-bits register */
 #define SPI_6358_MSG_CTL_WIDTH         16
 #define SPI_6358_MSG_DATA              0x02
 #define SPI_6358_MSG_TAIL              0x709
 #define SPI_6358_RX_TAIL               0x70B
 
-/* BCM 6358 SPI core */
-#define SPI_6368_MSG_CTL               0x00    /* 16-bits register */
-#define SPI_6368_MSG_CTL_WIDTH         16
-#define SPI_6368_MSG_DATA              0x02
-#define SPI_6368_MSG_DATA_SIZE         0x21e
-#define SPI_6368_RX_DATA               0x400
-#define SPI_6368_RX_DATA_SIZE          0x220
-#define SPI_6368_CMD                   0x700   /* 16-bits register */
-#define SPI_6368_INT_STATUS            0x702
-#define SPI_6368_INT_MASK_ST           0x703
-#define SPI_6368_INT_MASK              0x704
-#define SPI_6368_ST                    0x705
-#define SPI_6368_CLK_CFG               0x706
-#define SPI_6368_FILL_BYTE             0x707
-#define SPI_6368_MSG_TAIL              0x709
-#define SPI_6368_RX_TAIL               0x70B
-
 /* Shared SPI definitions */
 
 /* Message configuration */
 #define SPI_HD_W                       0x01
 #define SPI_HD_R                       0x02
 #define SPI_BYTE_CNT_SHIFT             0
-#define SPI_6338_MSG_TYPE_SHIFT                6
 #define SPI_6348_MSG_TYPE_SHIFT                6
 #define SPI_6358_MSG_TYPE_SHIFT                14
-#define SPI_6368_MSG_TYPE_SHIFT                14
 
 /* Command */
 #define SPI_CMD_NOOP                   0x00
 /*************************************************************************
  * _REG relative to RSET_MISC
  *************************************************************************/
-#define MISC_SERDES_CTRL_REG           0x0
+#define MISC_SERDES_CTRL_6328_REG      0x0
+#define MISC_SERDES_CTRL_6362_REG      0x4
 #define SERDES_PCIE_EN                 (1 << 0)
 #define SERDES_PCIE_EXD_EN             (1 << 15)
 
+#define MISC_STRAPBUS_6362_REG         0x14
+#define STRAPBUS_6362_FCVO_SHIFT       1
+#define STRAPBUS_6362_HSSPI_CLK_FAST   (1 << 13)
+#define STRAPBUS_6362_FCVO_MASK                (0x1f << STRAPBUS_6362_FCVO_SHIFT)
+#define STRAPBUS_6362_BOOT_SEL_SERIAL  (1 << 15)
+#define STRAPBUS_6362_BOOT_SEL_NAND    (0 << 15)
+
 #define MISC_STRAPBUS_6328_REG         0x240
 #define STRAPBUS_6328_FCVO_SHIFT       7
 #define STRAPBUS_6328_FCVO_MASK                (0x1f << STRAPBUS_6328_FCVO_SHIFT)
index 30931c42379d95841ff667bc2036e1f4939dfe51..94e3011ba7df99b55e5c6657e218a5cccc25e1e4 100644 (file)
@@ -19,6 +19,7 @@ static inline int is_bcm63xx_internal_registers(phys_t offset)
                        return 1;
                break;
        case BCM6328_CPU_ID:
+       case BCM6362_CPU_ID:
        case BCM6368_CPU_ID:
                if (offset >= 0xb0000000 && offset < 0xb1000000)
                        return 1;
index 9c95177f7a7e04bde7771cf5b7c8204753a57e9b..fe23034aaf721497af158ee2deec840038478dcf 100644 (file)
@@ -61,9 +61,8 @@ static inline int plat_device_is_coherent(struct device *dev)
 {
 #ifdef CONFIG_DMA_COHERENT
        return 1;
-#endif
-#ifdef CONFIG_DMA_NONCOHERENT
-       return 0;
+#else
+       return coherentio;
 #endif
 }
 
index 73d717a75cb0a1038e2f10009d67e657e23ab429..5b2f2e68e57f08210be7d4a98370cb3895111adf 100644 (file)
 #endif
 
 #ifdef CONFIG_32BIT
-
+#ifdef CONFIG_KVM_GUEST
+#define CAC_BASE               _AC(0x40000000, UL)
+#else
 #define CAC_BASE               _AC(0x80000000, UL)
+#endif
 #define IO_BASE                        _AC(0xa0000000, UL)
 #define UNCAC_BASE             _AC(0xa0000000, UL)
 
 #ifndef MAP_BASE
+#ifdef CONFIG_KVM_GUEST
+#define MAP_BASE               _AC(0x60000000, UL)
+#else
 #define MAP_BASE               _AC(0xc0000000, UL)
 #endif
+#endif
 
 /*
  * Memory above this physical address will be considered highmem.
index 75fd8c0f986eaff51a02ce2807a5c92442e657be..c0f3ef45c2c14f0e658feee183eabc6f8141ea9f 100644 (file)
@@ -57,5 +57,6 @@
 #define cpu_has_vint           0
 #define cpu_has_vtag_icache    0
 #define cpu_has_watch          1
+#define cpu_has_local_ebase    0
 
 #endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
new file mode 100644 (file)
index 0000000..9809972
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _MT7620_REGS_H_
+#define _MT7620_REGS_H_
+
+#define MT7620_SYSC_BASE               0x10000000
+
+#define SYSC_REG_CHIP_NAME0            0x00
+#define SYSC_REG_CHIP_NAME1            0x04
+#define SYSC_REG_CHIP_REV              0x0c
+#define SYSC_REG_SYSTEM_CONFIG0                0x10
+#define SYSC_REG_SYSTEM_CONFIG1                0x14
+#define SYSC_REG_CPLL_CONFIG0          0x54
+#define SYSC_REG_CPLL_CONFIG1          0x58
+
+#define MT7620N_CHIP_NAME0             0x33365452
+#define MT7620N_CHIP_NAME1             0x20203235
+
+#define MT7620A_CHIP_NAME0             0x3637544d
+#define MT7620A_CHIP_NAME1             0x20203032
+
+#define CHIP_REV_PKG_MASK              0x1
+#define CHIP_REV_PKG_SHIFT             16
+#define CHIP_REV_VER_MASK              0xf
+#define CHIP_REV_VER_SHIFT             8
+#define CHIP_REV_ECO_MASK              0xf
+
+#define CPLL_SW_CONFIG_SHIFT           31
+#define CPLL_SW_CONFIG_MASK            0x1
+#define CPLL_CPU_CLK_SHIFT             24
+#define CPLL_CPU_CLK_MASK              0x1
+#define CPLL_MULT_RATIO_SHIFT           16
+#define CPLL_MULT_RATIO                 0x7
+#define CPLL_DIV_RATIO_SHIFT            10
+#define CPLL_DIV_RATIO                  0x3
+
+#define SYSCFG0_DRAM_TYPE_MASK         0x3
+#define SYSCFG0_DRAM_TYPE_SHIFT                4
+#define SYSCFG0_DRAM_TYPE_SDRAM                0
+#define SYSCFG0_DRAM_TYPE_DDR1         1
+#define SYSCFG0_DRAM_TYPE_DDR2         2
+
+#define MT7620_DRAM_BASE               0x0
+#define MT7620_SDRAM_SIZE_MIN          2
+#define MT7620_SDRAM_SIZE_MAX          64
+#define MT7620_DDR1_SIZE_MIN           32
+#define MT7620_DDR1_SIZE_MAX           128
+#define MT7620_DDR2_SIZE_MIN           32
+#define MT7620_DDR2_SIZE_MAX           256
+
+#define MT7620_GPIO_MODE_I2C           BIT(0)
+#define MT7620_GPIO_MODE_UART0_SHIFT   2
+#define MT7620_GPIO_MODE_UART0_MASK    0x7
+#define MT7620_GPIO_MODE_UART0(x)      ((x) << MT7620_GPIO_MODE_UART0_SHIFT)
+#define MT7620_GPIO_MODE_UARTF         0x0
+#define MT7620_GPIO_MODE_PCM_UARTF     0x1
+#define MT7620_GPIO_MODE_PCM_I2S       0x2
+#define MT7620_GPIO_MODE_I2S_UARTF     0x3
+#define MT7620_GPIO_MODE_PCM_GPIO      0x4
+#define MT7620_GPIO_MODE_GPIO_UARTF    0x5
+#define MT7620_GPIO_MODE_GPIO_I2S      0x6
+#define MT7620_GPIO_MODE_GPIO          0x7
+#define MT7620_GPIO_MODE_UART1         BIT(5)
+#define MT7620_GPIO_MODE_MDIO          BIT(8)
+#define MT7620_GPIO_MODE_RGMII1                BIT(9)
+#define MT7620_GPIO_MODE_RGMII2                BIT(10)
+#define MT7620_GPIO_MODE_SPI           BIT(11)
+#define MT7620_GPIO_MODE_SPI_REF_CLK   BIT(12)
+#define MT7620_GPIO_MODE_WLED          BIT(13)
+#define MT7620_GPIO_MODE_JTAG          BIT(15)
+#define MT7620_GPIO_MODE_EPHY          BIT(15)
+#define MT7620_GPIO_MODE_WDT           BIT(22)
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h
new file mode 100644 (file)
index 0000000..03ad716
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _RT288X_REGS_H_
+#define _RT288X_REGS_H_
+
+#define RT2880_SYSC_BASE               0x00300000
+
+#define SYSC_REG_CHIP_NAME0            0x00
+#define SYSC_REG_CHIP_NAME1            0x04
+#define SYSC_REG_CHIP_ID               0x0c
+#define SYSC_REG_SYSTEM_CONFIG         0x10
+#define SYSC_REG_CLKCFG                        0x30
+
+#define RT2880_CHIP_NAME0              0x38325452
+#define RT2880_CHIP_NAME1              0x20203038
+
+#define CHIP_ID_ID_MASK                        0xff
+#define CHIP_ID_ID_SHIFT               8
+#define CHIP_ID_REV_MASK               0xff
+
+#define SYSTEM_CONFIG_CPUCLK_SHIFT     20
+#define SYSTEM_CONFIG_CPUCLK_MASK      0x3
+#define SYSTEM_CONFIG_CPUCLK_250       0x0
+#define SYSTEM_CONFIG_CPUCLK_266       0x1
+#define SYSTEM_CONFIG_CPUCLK_280       0x2
+#define SYSTEM_CONFIG_CPUCLK_300       0x3
+
+#define RT2880_GPIO_MODE_I2C           BIT(0)
+#define RT2880_GPIO_MODE_UART0         BIT(1)
+#define RT2880_GPIO_MODE_SPI           BIT(2)
+#define RT2880_GPIO_MODE_UART1         BIT(3)
+#define RT2880_GPIO_MODE_JTAG          BIT(4)
+#define RT2880_GPIO_MODE_MDIO          BIT(5)
+#define RT2880_GPIO_MODE_SDRAM         BIT(6)
+#define RT2880_GPIO_MODE_PCI           BIT(7)
+
+#define CLKCFG_SRAM_CS_N_WDT           BIT(9)
+
+#define RT2880_SDRAM_BASE              0x08000000
+#define RT2880_MEM_SIZE_MIN            2
+#define RT2880_MEM_SIZE_MAX            128
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..72fc106
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Ralink RT288x specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *     Copyright (C) 2003, 2004 Ralf Baechle
+ *     Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT288X_CPU_FEATURE_OVERRIDES_H
+#define _RT288X_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb            1
+#define cpu_has_4kex           1
+#define cpu_has_3k_cache       0
+#define cpu_has_4k_cache       1
+#define cpu_has_tx39_cache     0
+#define cpu_has_sb1_cache      0
+#define cpu_has_fpu            0
+#define cpu_has_32fpr          0
+#define cpu_has_counter                1
+#define cpu_has_watch          1
+#define cpu_has_divec          1
+
+#define cpu_has_prefetch       1
+#define cpu_has_ejtag          1
+#define cpu_has_llsc           1
+
+#define cpu_has_mips16         1
+#define cpu_has_mdmx           0
+#define cpu_has_mips3d         0
+#define cpu_has_smartmips      0
+
+#define cpu_has_mips32r1       1
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#define cpu_has_dsp            0
+#define cpu_has_mipsmt         0
+
+#define cpu_has_64bits         0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs  0
+#define cpu_has_64bit_addresses        0
+
+#define cpu_dcache_line_size() 16
+#define cpu_icache_line_size() 16
+
+#endif /* _RT288X_CPU_FEATURE_OVERRIDES_H */
index 7d344f2d7d0addb88148b1be703aa25a9c676452..069bf37a6010635ec026b0eae0bcb1766813460f 100644 (file)
@@ -97,6 +97,14 @@ static inline int soc_is_rt5350(void)
 #define RT5350_SYSCFG0_CPUCLK_320      0x2
 #define RT5350_SYSCFG0_CPUCLK_300      0x3
 
+#define RT5350_SYSCFG0_DRAM_SIZE_SHIFT  12
+#define RT5350_SYSCFG0_DRAM_SIZE_MASK   7
+#define RT5350_SYSCFG0_DRAM_SIZE_2M     0
+#define RT5350_SYSCFG0_DRAM_SIZE_8M     1
+#define RT5350_SYSCFG0_DRAM_SIZE_16M    2
+#define RT5350_SYSCFG0_DRAM_SIZE_32M    3
+#define RT5350_SYSCFG0_DRAM_SIZE_64M    4
+
 /* multi function gpio pins */
 #define RT305X_GPIO_I2C_SD             1
 #define RT305X_GPIO_I2C_SCLK           2
@@ -136,4 +144,23 @@ static inline int soc_is_rt5350(void)
 #define RT305X_GPIO_MODE_SDRAM         BIT(8)
 #define RT305X_GPIO_MODE_RGMII         BIT(9)
 
+#define RT3352_SYSC_REG_SYSCFG0                0x010
+#define RT3352_SYSC_REG_SYSCFG1         0x014
+#define RT3352_SYSC_REG_CLKCFG1         0x030
+#define RT3352_SYSC_REG_RSTCTRL         0x034
+#define RT3352_SYSC_REG_USB_PS          0x05c
+
+#define RT3352_CLKCFG0_XTAL_SEL                BIT(20)
+#define RT3352_CLKCFG1_UPHY0_CLK_EN    BIT(18)
+#define RT3352_CLKCFG1_UPHY1_CLK_EN    BIT(20)
+#define RT3352_RSTCTRL_UHST            BIT(22)
+#define RT3352_RSTCTRL_UDEV            BIT(25)
+#define RT3352_SYSCFG1_USB0_HOST_MODE  BIT(10)
+
+#define RT305X_SDRAM_BASE              0x00000000
+#define RT305X_MEM_SIZE_MIN            2
+#define RT305X_MEM_SIZE_MAX            64
+#define RT3352_MEM_SIZE_MIN            2
+#define RT3352_MEM_SIZE_MAX            256
+
 #endif
diff --git a/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..917c286
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Ralink RT305x specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *     Copyright (C) 2003, 2004 Ralf Baechle
+ *     Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT305X_CPU_FEATURE_OVERRIDES_H
+#define _RT305X_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb            1
+#define cpu_has_4kex           1
+#define cpu_has_3k_cache       0
+#define cpu_has_4k_cache       1
+#define cpu_has_tx39_cache     0
+#define cpu_has_sb1_cache      0
+#define cpu_has_fpu            0
+#define cpu_has_32fpr          0
+#define cpu_has_counter                1
+#define cpu_has_watch          1
+#define cpu_has_divec          1
+
+#define cpu_has_prefetch       1
+#define cpu_has_ejtag          1
+#define cpu_has_llsc           1
+
+#define cpu_has_mips16         1
+#define cpu_has_mdmx           0
+#define cpu_has_mips3d         0
+#define cpu_has_smartmips      0
+
+#define cpu_has_mips32r1       1
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#define cpu_has_dsp            1
+#define cpu_has_mipsmt         0
+
+#define cpu_has_64bits         0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs  0
+#define cpu_has_64bit_addresses        0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif /* _RT305X_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h
new file mode 100644 (file)
index 0000000..058382f
--- /dev/null
@@ -0,0 +1,252 @@
+/*
+ * Ralink RT3662/RT3883 SoC register definitions
+ *
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _RT3883_REGS_H_
+#define _RT3883_REGS_H_
+
+#include <linux/bitops.h>
+
+#define RT3883_SDRAM_BASE      0x00000000
+#define RT3883_SYSC_BASE       0x10000000
+#define RT3883_TIMER_BASE      0x10000100
+#define RT3883_INTC_BASE       0x10000200
+#define RT3883_MEMC_BASE       0x10000300
+#define RT3883_UART0_BASE      0x10000500
+#define RT3883_PIO_BASE                0x10000600
+#define RT3883_FSCC_BASE       0x10000700
+#define RT3883_NANDC_BASE      0x10000810
+#define RT3883_I2C_BASE                0x10000900
+#define RT3883_I2S_BASE                0x10000a00
+#define RT3883_SPI_BASE                0x10000b00
+#define RT3883_UART1_BASE      0x10000c00
+#define RT3883_PCM_BASE                0x10002000
+#define RT3883_GDMA_BASE       0x10002800
+#define RT3883_CODEC1_BASE     0x10003000
+#define RT3883_CODEC2_BASE     0x10003800
+#define RT3883_FE_BASE         0x10100000
+#define RT3883_ROM_BASE                0x10118000
+#define RT3883_USBDEV_BASE     0x10112000
+#define RT3883_PCI_BASE                0x10140000
+#define RT3883_WLAN_BASE       0x10180000
+#define RT3883_USBHOST_BASE    0x101c0000
+#define RT3883_BOOT_BASE       0x1c000000
+#define RT3883_SRAM_BASE       0x1e000000
+#define RT3883_PCIMEM_BASE     0x20000000
+
+#define RT3883_EHCI_BASE       (RT3883_USBHOST_BASE)
+#define RT3883_OHCI_BASE       (RT3883_USBHOST_BASE + 0x1000)
+
+#define RT3883_SYSC_SIZE       0x100
+#define RT3883_TIMER_SIZE      0x100
+#define RT3883_INTC_SIZE       0x100
+#define RT3883_MEMC_SIZE       0x100
+#define RT3883_UART0_SIZE      0x100
+#define RT3883_UART1_SIZE      0x100
+#define RT3883_PIO_SIZE                0x100
+#define RT3883_FSCC_SIZE       0x100
+#define RT3883_NANDC_SIZE      0x0f0
+#define RT3883_I2C_SIZE                0x100
+#define RT3883_I2S_SIZE                0x100
+#define RT3883_SPI_SIZE                0x100
+#define RT3883_PCM_SIZE                0x800
+#define RT3883_GDMA_SIZE       0x800
+#define RT3883_CODEC1_SIZE     0x800
+#define RT3883_CODEC2_SIZE     0x800
+#define RT3883_FE_SIZE         0x10000
+#define RT3883_ROM_SIZE                0x4000
+#define RT3883_USBDEV_SIZE     0x4000
+#define RT3883_PCI_SIZE                0x40000
+#define RT3883_WLAN_SIZE       0x40000
+#define RT3883_USBHOST_SIZE    0x40000
+#define RT3883_BOOT_SIZE       (32 * 1024 * 1024)
+#define RT3883_SRAM_SIZE       (32 * 1024 * 1024)
+
+/* SYSC registers */
+#define RT3883_SYSC_REG_CHIPID0_3      0x00    /* Chip ID 0 */
+#define RT3883_SYSC_REG_CHIPID4_7      0x04    /* Chip ID 1 */
+#define RT3883_SYSC_REG_REVID          0x0c    /* Chip Revision Identification */
+#define RT3883_SYSC_REG_SYSCFG0                0x10    /* System Configuration 0 */
+#define RT3883_SYSC_REG_SYSCFG1                0x14    /* System Configuration 1 */
+#define RT3883_SYSC_REG_CLKCFG0                0x2c    /* Clock Configuration 0 */
+#define RT3883_SYSC_REG_CLKCFG1                0x30    /* Clock Configuration 1 */
+#define RT3883_SYSC_REG_RSTCTRL                0x34    /* Reset Control*/
+#define RT3883_SYSC_REG_RSTSTAT                0x38    /* Reset Status*/
+#define RT3883_SYSC_REG_USB_PS         0x5c    /* USB Power saving control */
+#define RT3883_SYSC_REG_GPIO_MODE      0x60    /* GPIO Purpose Select */
+#define RT3883_SYSC_REG_PCIE_CLK_GEN0  0x7c
+#define RT3883_SYSC_REG_PCIE_CLK_GEN1  0x80
+#define RT3883_SYSC_REG_PCIE_CLK_GEN2  0x84
+#define RT3883_SYSC_REG_PMU            0x88
+#define RT3883_SYSC_REG_PMU1           0x8c
+
+#define RT3883_CHIP_NAME0              0x38335452
+#define RT3883_CHIP_NAME1              0x20203338
+
+#define RT3883_REVID_VER_ID_MASK       0x0f
+#define RT3883_REVID_VER_ID_SHIFT      8
+#define RT3883_REVID_ECO_ID_MASK       0x0f
+
+#define RT3883_SYSCFG0_DRAM_TYPE_DDR2  BIT(17)
+#define RT3883_SYSCFG0_CPUCLK_SHIFT    8
+#define RT3883_SYSCFG0_CPUCLK_MASK     0x3
+#define RT3883_SYSCFG0_CPUCLK_250      0x0
+#define RT3883_SYSCFG0_CPUCLK_384      0x1
+#define RT3883_SYSCFG0_CPUCLK_480      0x2
+#define RT3883_SYSCFG0_CPUCLK_500      0x3
+
+#define RT3883_SYSCFG1_USB0_HOST_MODE  BIT(10)
+#define RT3883_SYSCFG1_PCIE_RC_MODE    BIT(8)
+#define RT3883_SYSCFG1_PCI_HOST_MODE   BIT(7)
+#define RT3883_SYSCFG1_PCI_66M_MODE    BIT(6)
+#define RT3883_SYSCFG1_GPIO2_AS_WDT_OUT        BIT(2)
+
+#define RT3883_CLKCFG1_PCIE_CLK_EN     BIT(21)
+#define RT3883_CLKCFG1_UPHY1_CLK_EN    BIT(20)
+#define RT3883_CLKCFG1_PCI_CLK_EN      BIT(19)
+#define RT3883_CLKCFG1_UPHY0_CLK_EN    BIT(18)
+
+#define RT3883_GPIO_MODE_I2C           BIT(0)
+#define RT3883_GPIO_MODE_SPI           BIT(1)
+#define RT3883_GPIO_MODE_UART0_SHIFT   2
+#define RT3883_GPIO_MODE_UART0_MASK    0x7
+#define RT3883_GPIO_MODE_UART0(x)      ((x) << RT3883_GPIO_MODE_UART0_SHIFT)
+#define RT3883_GPIO_MODE_UARTF         0x0
+#define RT3883_GPIO_MODE_PCM_UARTF     0x1
+#define RT3883_GPIO_MODE_PCM_I2S       0x2
+#define RT3883_GPIO_MODE_I2S_UARTF     0x3
+#define RT3883_GPIO_MODE_PCM_GPIO      0x4
+#define RT3883_GPIO_MODE_GPIO_UARTF    0x5
+#define RT3883_GPIO_MODE_GPIO_I2S      0x6
+#define RT3883_GPIO_MODE_GPIO          0x7
+#define RT3883_GPIO_MODE_UART1         BIT(5)
+#define RT3883_GPIO_MODE_JTAG          BIT(6)
+#define RT3883_GPIO_MODE_MDIO          BIT(7)
+#define RT3883_GPIO_MODE_GE1           BIT(9)
+#define RT3883_GPIO_MODE_GE2           BIT(10)
+#define RT3883_GPIO_MODE_PCI_SHIFT     11
+#define RT3883_GPIO_MODE_PCI_MASK      0x7
+#define RT3883_GPIO_MODE_PCI           (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT)
+#define RT3883_GPIO_MODE_LNA_A_SHIFT   16
+#define RT3883_GPIO_MODE_LNA_A_MASK    0x3
+#define _RT3883_GPIO_MODE_LNA_A(_x)    ((_x) << RT3883_GPIO_MODE_LNA_A_SHIFT)
+#define RT3883_GPIO_MODE_LNA_A_GPIO    0x3
+#define RT3883_GPIO_MODE_LNA_A         _RT3883_GPIO_MODE_LNA_A(RT3883_GPIO_MODE_LNA_A_MASK)
+#define RT3883_GPIO_MODE_LNA_G_SHIFT   18
+#define RT3883_GPIO_MODE_LNA_G_MASK    0x3
+#define _RT3883_GPIO_MODE_LNA_G(_x)    ((_x) << RT3883_GPIO_MODE_LNA_G_SHIFT)
+#define RT3883_GPIO_MODE_LNA_G_GPIO    0x3
+#define RT3883_GPIO_MODE_LNA_G         _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
+
+#define RT3883_GPIO_I2C_SD             1
+#define RT3883_GPIO_I2C_SCLK           2
+#define RT3883_GPIO_SPI_CS0            3
+#define RT3883_GPIO_SPI_CLK            4
+#define RT3883_GPIO_SPI_MOSI           5
+#define RT3883_GPIO_SPI_MISO           6
+#define RT3883_GPIO_7                  7
+#define RT3883_GPIO_10                 10
+#define RT3883_GPIO_11                 11
+#define RT3883_GPIO_14                 14
+#define RT3883_GPIO_UART1_TXD          15
+#define RT3883_GPIO_UART1_RXD          16
+#define RT3883_GPIO_JTAG_TDO           17
+#define RT3883_GPIO_JTAG_TDI           18
+#define RT3883_GPIO_JTAG_TMS           19
+#define RT3883_GPIO_JTAG_TCLK          20
+#define RT3883_GPIO_JTAG_TRST_N                21
+#define RT3883_GPIO_MDIO_MDC           22
+#define RT3883_GPIO_MDIO_MDIO          23
+#define RT3883_GPIO_LNA_PE_A0          32
+#define RT3883_GPIO_LNA_PE_A1          33
+#define RT3883_GPIO_LNA_PE_A2          34
+#define RT3883_GPIO_LNA_PE_G0          35
+#define RT3883_GPIO_LNA_PE_G1          36
+#define RT3883_GPIO_LNA_PE_G2          37
+#define RT3883_GPIO_PCI_AD0            40
+#define RT3883_GPIO_PCI_AD31           71
+#define RT3883_GPIO_GE2_TXD0           72
+#define RT3883_GPIO_GE2_TXD1           73
+#define RT3883_GPIO_GE2_TXD2           74
+#define RT3883_GPIO_GE2_TXD3           75
+#define RT3883_GPIO_GE2_TXEN           76
+#define RT3883_GPIO_GE2_TXCLK          77
+#define RT3883_GPIO_GE2_RXD0           78
+#define RT3883_GPIO_GE2_RXD1           79
+#define RT3883_GPIO_GE2_RXD2           80
+#define RT3883_GPIO_GE2_RXD3           81
+#define RT3883_GPIO_GE2_RXDV           82
+#define RT3883_GPIO_GE2_RXCLK          83
+#define RT3883_GPIO_GE1_TXD0           84
+#define RT3883_GPIO_GE1_TXD1           85
+#define RT3883_GPIO_GE1_TXD2           86
+#define RT3883_GPIO_GE1_TXD3           87
+#define RT3883_GPIO_GE1_TXEN           88
+#define RT3883_GPIO_GE1_TXCLK          89
+#define RT3883_GPIO_GE1_RXD0           90
+#define RT3883_GPIO_GE1_RXD1           91
+#define RT3883_GPIO_GE1_RXD2           92
+#define RT3883_GPIO_GE1_RXD3           93
+#define RT3883_GPIO_GE1_RXDV           94
+#define RT3883_GPIO_GE1_RXCLK  95
+
+#define RT3883_RSTCTRL_PCIE_PCI_PDM    BIT(27)
+#define RT3883_RSTCTRL_FLASH           BIT(26)
+#define RT3883_RSTCTRL_UDEV            BIT(25)
+#define RT3883_RSTCTRL_PCI             BIT(24)
+#define RT3883_RSTCTRL_PCIE            BIT(23)
+#define RT3883_RSTCTRL_UHST            BIT(22)
+#define RT3883_RSTCTRL_FE              BIT(21)
+#define RT3883_RSTCTRL_WLAN            BIT(20)
+#define RT3883_RSTCTRL_UART1           BIT(29)
+#define RT3883_RSTCTRL_SPI             BIT(18)
+#define RT3883_RSTCTRL_I2S             BIT(17)
+#define RT3883_RSTCTRL_I2C             BIT(16)
+#define RT3883_RSTCTRL_NAND            BIT(15)
+#define RT3883_RSTCTRL_DMA             BIT(14)
+#define RT3883_RSTCTRL_PIO             BIT(13)
+#define RT3883_RSTCTRL_UART            BIT(12)
+#define RT3883_RSTCTRL_PCM             BIT(11)
+#define RT3883_RSTCTRL_MC              BIT(10)
+#define RT3883_RSTCTRL_INTC            BIT(9)
+#define RT3883_RSTCTRL_TIMER           BIT(8)
+#define RT3883_RSTCTRL_SYS             BIT(0)
+
+#define RT3883_INTC_INT_SYSCTL BIT(0)
+#define RT3883_INTC_INT_TIMER0 BIT(1)
+#define RT3883_INTC_INT_TIMER1 BIT(2)
+#define RT3883_INTC_INT_IA     BIT(3)
+#define RT3883_INTC_INT_PCM    BIT(4)
+#define RT3883_INTC_INT_UART0  BIT(5)
+#define RT3883_INTC_INT_PIO    BIT(6)
+#define RT3883_INTC_INT_DMA    BIT(7)
+#define RT3883_INTC_INT_NAND   BIT(8)
+#define RT3883_INTC_INT_PERFC  BIT(9)
+#define RT3883_INTC_INT_I2S    BIT(10)
+#define RT3883_INTC_INT_UART1  BIT(12)
+#define RT3883_INTC_INT_UHST   BIT(18)
+#define RT3883_INTC_INT_UDEV   BIT(19)
+
+/* FLASH/SRAM/Codec Controller registers */
+#define RT3883_FSCC_REG_FLASH_CFG0     0x00
+#define RT3883_FSCC_REG_FLASH_CFG1     0x04
+#define RT3883_FSCC_REG_CODEC_CFG0     0x40
+#define RT3883_FSCC_REG_CODEC_CFG1     0x44
+
+#define RT3883_FLASH_CFG_WIDTH_SHIFT   26
+#define RT3883_FLASH_CFG_WIDTH_MASK    0x3
+#define RT3883_FLASH_CFG_WIDTH_8BIT    0x0
+#define RT3883_FLASH_CFG_WIDTH_16BIT   0x1
+#define RT3883_FLASH_CFG_WIDTH_32BIT   0x2
+
+#define RT3883_SDRAM_BASE              0x00000000
+#define RT3883_MEM_SIZE_MIN            2
+#define RT3883_MEM_SIZE_MAX            256
+
+#endif /* _RT3883_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..181fbf4
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Ralink RT3662/RT3883 specific CPU feature overrides
+ *
+ * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *     Copyright (C) 2003, 2004 Ralf Baechle
+ *     Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _RT3883_CPU_FEATURE_OVERRIDES_H
+#define _RT3883_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb            1
+#define cpu_has_4kex           1
+#define cpu_has_3k_cache       0
+#define cpu_has_4k_cache       1
+#define cpu_has_tx39_cache     0
+#define cpu_has_sb1_cache      0
+#define cpu_has_fpu            0
+#define cpu_has_32fpr          0
+#define cpu_has_counter                1
+#define cpu_has_watch          1
+#define cpu_has_divec          1
+
+#define cpu_has_prefetch       1
+#define cpu_has_ejtag          1
+#define cpu_has_llsc           1
+
+#define cpu_has_mips16         1
+#define cpu_has_mdmx           0
+#define cpu_has_mips3d         0
+#define cpu_has_smartmips      0
+
+#define cpu_has_mips32r1       1
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#define cpu_has_dsp            1
+#define cpu_has_mipsmt         0
+
+#define cpu_has_64bits         0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs  0
+#define cpu_has_64bit_addresses        0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#endif /* _RT3883_CPU_FEATURE_OVERRIDES_H */
index 193c0912d38e651519b8fb8b7bbc9fa31d7893e8..bfbd7035d4c54541787fb7de42cbafe7b1230edb 100644 (file)
 /* #define cpu_has_prefetch    ? */
 #define cpu_has_mcheck         1
 /* #define cpu_has_ejtag       ? */
+#ifdef CONFIG_CPU_MICROMIPS
+#define cpu_has_llsc           0
+#else
 #define cpu_has_llsc           1
+#endif
 /* #define cpu_has_vtag_icache ? */
 /* #define cpu_has_dc_aliases  ? */
 /* #define cpu_has_ic_fills_f_dc ? */
index 44a09a64160ac52fac8862beaaa8a9c9815f3fd2..bd9746fbe4af8a6b098df65c5f386cecec86f2e4 100644 (file)
@@ -83,4 +83,7 @@ extern void mips_pcibios_init(void);
 #define mips_pcibios_init() do { } while (0)
 #endif
 
+extern void mips_scroll_message(void);
+extern void mips_display_message(const char *str);
+
 #endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/mips-boards/prom.h b/arch/mips/include/asm/mips-boards/prom.h
deleted file mode 100644 (file)
index e7aed3e..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * ########################################################################
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- * MIPS boards bootprom interface for the Linux kernel.
- *
- */
-
-#ifndef _MIPS_PROM_H
-#define _MIPS_PROM_H
-
-extern char *prom_getcmdline(void);
-extern char *prom_getenv(char *name);
-extern void prom_init_cmdline(void);
-extern void prom_meminit(void);
-extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem);
-extern void mips_display_message(const char *str);
-extern void mips_display_word(unsigned int num);
-extern void mips_scroll_message(void);
-extern int get_ethernet_addr(char *ethernet_addr);
-
-/* Memory descriptor management. */
-#define PROM_MAX_PMEMBLOCKS    32
-struct prom_pmemblock {
-       unsigned long base; /* Within KSEG0. */
-       unsigned int size;  /* In bytes. */
-       unsigned int type;  /* free or prom memory */
-};
-
-#endif /* !(_MIPS_PROM_H) */
index 363bb352c7f70b4da402ed8725ce383a0f22a6e1..9d00aebe98426c5ab93c44bb0df4b09c1d552b7b 100644 (file)
@@ -42,13 +42,9 @@ extern long __mips_machines_end;
 #ifdef CONFIG_MIPS_MACHINE
 int  mips_machtype_setup(char *id) __init;
 void mips_machine_setup(void) __init;
-void mips_set_machine_name(const char *name) __init;
-char *mips_get_machine_name(void);
 #else
 static inline int mips_machtype_setup(char *id) { return 1; }
 static inline void mips_machine_setup(void) { }
-static inline void mips_set_machine_name(const char *name) { }
-static inline char *mips_get_machine_name(void) { return NULL; }
 #endif /* CONFIG_MIPS_MACHINE */
 
 #endif /* __ASM_MIPS_MACHINE_H */
index 0da44d422f5b0242380bfc0dde4973b547c1900e..87e6207b05e4334f242cadf80b01305811edd3fd 100644 (file)
 #define MIPS_CONF3_RXI         (_ULCAST_(1) << 12)
 #define MIPS_CONF3_ULRI                (_ULCAST_(1) << 13)
 #define MIPS_CONF3_ISA         (_ULCAST_(3) << 14)
+#define MIPS_CONF3_ISA_OE      (_ULCAST_(3) << 16)
 #define MIPS_CONF3_VZ          (_ULCAST_(1) << 23)
 
 #define MIPS_CONF4_MMUSIZEEXT  (_ULCAST_(255) << 0)
 
 #ifndef __ASSEMBLY__
 
+/*
+ * Macros for handling the ISA mode bit for microMIPS.
+ */
+#define get_isa16_mode(x)              ((x) & 0x1)
+#define msk_isa16_mode(x)              ((x) & ~0x1)
+#define set_isa16_mode(x)              do { (x) |= 0x1; } while(0)
+
+/*
+ * microMIPS instructions can be 16-bit or 32-bit in length. This
+ * returns a 1 if the instruction is 16-bit and a 0 if 32-bit.
+ */
+static inline int mm_insn_16bit(u16 insn)
+{
+       u16 opcode = (insn >> 10) & 0x7;
+
+       return (opcode >= 1 && opcode <= 3) ? 1 : 0;
+}
+
 /*
  * Functions to access the R10000 performance counters.         These are basically
  * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit
index e81d719efcd18e9e226d07e8d69e29b970c89280..1554721e4808e7ffc67d61bc87646cbdd5a325be 100644 (file)
 
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 
-#define TLBMISS_HANDLER_SETUP_PGD(pgd)                         \
-       tlbmiss_handler_setup_pgd((unsigned long)(pgd))
-
-extern void tlbmiss_handler_setup_pgd(unsigned long pgd);
+#define TLBMISS_HANDLER_SETUP_PGD(pgd)                                 \
+do {                                                                   \
+       void (*tlbmiss_handler_setup_pgd)(unsigned long);               \
+       extern u32 tlbmiss_handler_setup_pgd_array[16];                 \
+                                                                       \
+       tlbmiss_handler_setup_pgd =                                     \
+               (__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \
+       tlbmiss_handler_setup_pgd((unsigned long)(pgd));                \
+} while (0)
 
 #define TLBMISS_HANDLER_SETUP()                                                \
        do {                                                            \
@@ -62,59 +67,88 @@ extern unsigned long pgd_current[];
        TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
 #endif
 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
-
-#define ASID_INC       0x40
-#define ASID_MASK      0xfc0
-
-#elif defined(CONFIG_CPU_R8000)
-
-#define ASID_INC       0x10
-#define ASID_MASK      0xff0
 
-#elif defined(CONFIG_MIPS_MT_SMTC)
-
-#define ASID_INC       0x1
-extern unsigned long smtc_asid_mask;
-#define ASID_MASK      (smtc_asid_mask)
-#define HW_ASID_MASK   0xff
-/* End SMTC/34K debug hack */
-#else /* FIXME: not correct for R6000 */
-
-#define ASID_INC       0x1
-#define ASID_MASK      0xff
+#define ASID_INC(asid)                                         \
+({                                                             \
+       unsigned long __asid = asid;                            \
+       __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t"          \
+       ".section\t__asid_inc,\"a\"\n\t"                        \
+       ".word\t1b\n\t"                                         \
+       ".previous"                                             \
+       :"=r" (__asid)                                          \
+       :"0" (__asid));                                         \
+       __asid;                                                 \
+})
+#define ASID_MASK(asid)                                                \
+({                                                             \
+       unsigned long __asid = asid;                            \
+       __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t"      \
+       ".section\t__asid_mask,\"a\"\n\t"                       \
+       ".word\t1b\n\t"                                         \
+       ".previous"                                             \
+       :"=r" (__asid)                                          \
+       :"r" (__asid));                                         \
+       __asid;                                                 \
+})
+#define ASID_VERSION_MASK                                      \
+({                                                             \
+       unsigned long __asid;                                   \
+       __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t"  \
+       ".section\t__asid_version_mask,\"a\"\n\t"               \
+       ".word\t1b\n\t"                                         \
+       ".previous"                                             \
+       :"=r" (__asid));                                        \
+       __asid;                                                 \
+})
+#define ASID_FIRST_VERSION                                     \
+({                                                             \
+       unsigned long __asid = asid;                            \
+       __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t"         \
+       ".section\t__asid_first_version,\"a\"\n\t"              \
+       ".word\t1b\n\t"                                         \
+       ".previous"                                             \
+       :"=r" (__asid));                                        \
+       __asid;                                                 \
+})
+
+#define ASID_FIRST_VERSION_R3000       0x1000
+#define ASID_FIRST_VERSION_R4000       0x100
+#define ASID_FIRST_VERSION_R8000       0x1000
+#define ASID_FIRST_VERSION_RM9000      0x1000
 
+#ifdef CONFIG_MIPS_MT_SMTC
+#define SMTC_HW_ASID_MASK              0xff
+extern unsigned int smtc_asid_mask;
 #endif
 
 #define cpu_context(cpu, mm)   ((mm)->context.asid[cpu])
-#define cpu_asid(cpu, mm)      (cpu_context((cpu), (mm)) & ASID_MASK)
+#define cpu_asid(cpu, mm)      ASID_MASK(cpu_context((cpu), (mm)))
 #define asid_cache(cpu)                (cpu_data[cpu].asid_cache)
 
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 }
 
-/*
- *  All unused by hardware upper bits will be considered
- *  as a software asid extension.
- */
-#define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
-#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
-
 #ifndef CONFIG_MIPS_MT_SMTC
 /* Normal, classic MIPS get_new_mmu_context */
 static inline void
 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
 {
+       extern void kvm_local_flush_tlb_all(void);
        unsigned long asid = asid_cache(cpu);
 
-       if (! ((asid += ASID_INC) & ASID_MASK) ) {
+       if (!ASID_MASK((asid = ASID_INC(asid)))) {
                if (cpu_has_vtag_icache)
                        flush_icache_all();
+#ifdef CONFIG_VIRTUALIZATION
+               kvm_local_flush_tlb_all();      /* start new asid cycle */
+#else
                local_flush_tlb_all();  /* start new asid cycle */
+#endif
                if (!asid)              /* fix version if needed */
                        asid = ASID_FIRST_VERSION;
        }
+
        cpu_context(cpu, mm) = asid_cache(cpu) = asid;
 }
 
@@ -133,7 +167,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 {
        int i;
 
-       for_each_online_cpu(i)
+       for_each_possible_cpu(i)
                cpu_context(i, mm) = 0;
 
        return 0;
@@ -166,7 +200,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
         * free up the ASID value for use and flush any old
         * instances of it from the TLB.
         */
-       oldasid = (read_c0_entryhi() & ASID_MASK);
+       oldasid = ASID_MASK(read_c0_entryhi());
        if(smtc_live_asid[mytlb][oldasid]) {
                smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
                if(smtc_live_asid[mytlb][oldasid] == 0)
@@ -177,7 +211,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
         * having ASID_MASK smaller than the hardware maximum,
         * make sure no "soft" bits become "hard"...
         */
-       write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
+       write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
                         cpu_asid(cpu, next));
        ehb(); /* Make sure it propagates to TCStatus */
        evpe(mtflags);
@@ -230,15 +264,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
 #ifdef CONFIG_MIPS_MT_SMTC
        /* See comments for similar code above */
        mtflags = dvpe();
-       oldasid = read_c0_entryhi() & ASID_MASK;
+       oldasid = ASID_MASK(read_c0_entryhi());
        if(smtc_live_asid[mytlb][oldasid]) {
                smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
                if(smtc_live_asid[mytlb][oldasid] == 0)
                         smtc_flush_tlb_asid(oldasid);
        }
        /* See comments for similar code above */
-       write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
-                        cpu_asid(cpu, next));
+       write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
+                        cpu_asid(cpu, next));
        ehb(); /* Make sure it propagates to TCStatus */
        evpe(mtflags);
 #else
@@ -275,14 +309,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
 #ifdef CONFIG_MIPS_MT_SMTC
                /* See comments for similar code above */
                prevvpe = dvpe();
-               oldasid = (read_c0_entryhi() & ASID_MASK);
+               oldasid = ASID_MASK(read_c0_entryhi());
                if (smtc_live_asid[mytlb][oldasid]) {
                        smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
                        if(smtc_live_asid[mytlb][oldasid] == 0)
                                smtc_flush_tlb_asid(oldasid);
                }
                /* See comments for similar code above */
-               write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
+               write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
                                | cpu_asid(cpu, mm));
                ehb(); /* Make sure it propagates to TCStatus */
                evpe(prevvpe);
index 419d8aef8569333418f68f61e88144d1e0b37285..79c7cccdc22ca5b688cfe6f5bdb125f17b37a0dd 100644 (file)
 #ifndef __NLM_HAL_HALDEFS_H__
 #define __NLM_HAL_HALDEFS_H__
 
+#include <linux/irqflags.h>    /* for local_irq_disable */
+
 /*
  * This file contains platform specific memory mapped IO implementation
  * and will provide a way to read 32/64 bit memory mapped registers in
  * all ABIs
  */
-#if !defined(CONFIG_64BIT) && defined(CONFIG_CPU_XLP)
-#error "o32 compile not supported on XLP yet"
-#endif
-/*
- * For o32 compilation, we have to disable interrupts and enable KX bit to
- * access 64 bit addresses or data.
- *
- * We need to disable interrupts because we save just the lower 32 bits of
- * registers in         interrupt handling. So if we get hit by an interrupt while
- * using the upper 32 bits of a register, we lose.
- */
-static inline uint32_t nlm_save_flags_kx(void)
-{
-       return change_c0_status(ST0_KX | ST0_IE, ST0_KX);
-}
-
-static inline uint32_t nlm_save_flags_cop2(void)
-{
-       return change_c0_status(ST0_CU2 | ST0_IE, ST0_CU2);
-}
-
-static inline void nlm_restore_flags(uint32_t sr)
-{
-       write_c0_status(sr);
-}
-
-/*
- * The n64 implementations are simple, the o32 implementations when they
- * are added, will have to disable interrupts and enable KX before doing
- * 64 bit ops.
- */
 static inline uint32_t
 nlm_read_reg(uint64_t base, uint32_t reg)
 {
@@ -87,13 +58,40 @@ nlm_write_reg(uint64_t base, uint32_t reg, uint32_t val)
        *addr = val;
 }
 
+/*
+ * For o32 compilation, we have to disable interrupts to access 64 bit
+ * registers
+ *
+ * We need to disable interrupts because we save just the lower 32 bits of
+ * registers in  interrupt handling. So if we get hit by an interrupt while
+ * using the upper 32 bits of a register, we lose.
+ */
+
 static inline uint64_t
 nlm_read_reg64(uint64_t base, uint32_t reg)
 {
        uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
        volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
-
-       return *ptr;
+       uint64_t val;
+
+       if (sizeof(unsigned long) == 4) {
+               unsigned long flags;
+
+               local_irq_save(flags);
+               __asm__ __volatile__(
+                       ".set   push"                   "\n\t"
+                       ".set   mips64"                 "\n\t"
+                       "ld     %L0, %1"                "\n\t"
+                       "dsra32 %M0, %L0, 0"            "\n\t"
+                       "sll    %L0, %L0, 0"            "\n\t"
+                       ".set   pop"                    "\n"
+                       : "=r" (val)
+                       : "m" (*ptr));
+               local_irq_restore(flags);
+       } else
+               val = *ptr;
+
+       return val;
 }
 
 static inline void
@@ -102,7 +100,25 @@ nlm_write_reg64(uint64_t base, uint32_t reg, uint64_t val)
        uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
        volatile uint64_t *ptr = (volatile uint64_t *)(long)addr;
 
-       *ptr = val;
+       if (sizeof(unsigned long) == 4) {
+               unsigned long flags;
+               uint64_t tmp;
+
+               local_irq_save(flags);
+               __asm__ __volatile__(
+                       ".set   push"                   "\n\t"
+                       ".set   mips64"                 "\n\t"
+                       "dsll32 %L0, %L0, 0"            "\n\t"
+                       "dsrl32 %L0, %L0, 0"            "\n\t"
+                       "dsll32 %M0, %M0, 0"            "\n\t"
+                       "or     %L0, %L0, %M0"          "\n\t"
+                       "sd     %L0, %2"                "\n\t"
+                       ".set   pop"                    "\n"
+                       : "=r" (tmp)
+                       : "0" (val), "m" (*ptr));
+               local_irq_restore(flags);
+       } else
+               *ptr = val;
 }
 
 /*
@@ -143,14 +159,6 @@ nlm_pcicfg_base(uint32_t devoffset)
        return nlm_io_base + devoffset;
 }
 
-static inline uint64_t
-nlm_xkphys_map_pcibar0(uint64_t pcibase)
-{
-       uint64_t paddr;
-
-       paddr = nlm_read_reg(pcibase, 0x4) & ~0xfu;
-       return (uint64_t)0x9000000000000000 | paddr;
-}
 #elif defined(CONFIG_CPU_XLR)
 
 static inline uint64_t
index 8ad2e0f81719814f077659d48c46a3b4f87c578d..f299d31d7c1a3faf7eeffd22f7409cce90954cd6 100644 (file)
 /*
  * XLR and XLP interrupt request and interrupt mask registers
  */
-#define read_c0_eirr()         __read_64bit_c0_register($9, 6)
-#define read_c0_eimr()         __read_64bit_c0_register($9, 7)
-#define write_c0_eirr(val)     __write_64bit_c0_register($9, 6, val)
-
 /*
- * Writing EIMR in 32 bit is a special case, the lower 8 bit of the
- * EIMR is shadowed in the status register, so we cannot save and
- * restore status register for split read.
+ * NOTE: Do not save/restore flags around write_c0_eimr().
+ * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS
+ * register. Restoring flags will overwrite the lower 8 bits of EIMR.
+ *
+ * Call with interrupts disabled.
  */
 #define write_c0_eimr(val)                                             \
 do {                                                                   \
        if (sizeof(unsigned long) == 4) {                               \
-               unsigned long __flags;                                  \
-                                                                       \
-               local_irq_save(__flags);                                \
                __asm__ __volatile__(                                   \
                        ".set\tmips64\n\t"                              \
                        "dsll\t%L0, %L0, 32\n\t"                        \
@@ -62,8 +57,6 @@ do {                                                                  \
                        "dmtc0\t%L0, $9, 7\n\t"                         \
                        ".set\tmips0"                                   \
                        : : "r" (val));                                 \
-               __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\
-               local_irq_restore(__flags);                             \
        } else                                                          \
                __write_64bit_c0_register($9, 7, (val));                \
 } while (0)
@@ -128,7 +121,7 @@ static inline uint64_t read_c0_eirr_and_eimr(void)
        uint64_t val;
 
 #ifdef CONFIG_64BIT
-       val = read_c0_eimr() & read_c0_eirr();
+       val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7);
 #else
        __asm__ __volatile__(
                ".set   push\n\t"
@@ -143,7 +136,6 @@ static inline uint64_t read_c0_eirr_and_eimr(void)
                ".set   pop"
                : "=r" (val));
 #endif
-
        return val;
 }
 
index 3df53017fe513f29b8ee99a6ce2ff11ad78b198c..a981f4681a154a53fb6fed0bffc788be83ed24a3 100644 (file)
 #define PIC_IRT_PCIE_LINK_2_INDEX      80
 #define PIC_IRT_PCIE_LINK_3_INDEX      81
 #define PIC_IRT_PCIE_LINK_INDEX(num)   ((num) + PIC_IRT_PCIE_LINK_0_INDEX)
-/* 78 to 81 */
-#define PIC_NUM_NA_IRTS                        32
-/* 82 to 113 */
-#define PIC_IRT_NA_0_INDEX             82
-#define PIC_IRT_NA_INDEX(num)          ((num) + PIC_IRT_NA_0_INDEX)
-#define PIC_IRT_POE_INDEX              114
-
-#define PIC_NUM_USB_IRTS               6
-#define PIC_IRT_USB_0_INDEX            115
-#define PIC_IRT_EHCI_0_INDEX           115
-#define PIC_IRT_OHCI_0_INDEX           116
-#define PIC_IRT_OHCI_1_INDEX           117
-#define PIC_IRT_EHCI_1_INDEX           118
-#define PIC_IRT_OHCI_2_INDEX           119
-#define PIC_IRT_OHCI_3_INDEX           120
-#define PIC_IRT_USB_INDEX(num)         ((num) + PIC_IRT_USB_0_INDEX)
-/* 115 to 120 */
-#define PIC_IRT_GDX_INDEX              121
-#define PIC_IRT_SEC_INDEX              122
-#define PIC_IRT_RSA_INDEX              123
-
-#define PIC_NUM_COMP_IRTS              4
-#define PIC_IRT_COMP_0_INDEX           124
-#define PIC_IRT_COMP_INDEX(num)                ((num) + PIC_IRT_COMP_0_INDEX)
-/* 124 to 127 */
-#define PIC_IRT_GBU_INDEX              128
-#define PIC_IRT_ICC_0_INDEX            129 /* ICC - Inter Chip Coherency */
-#define PIC_IRT_ICC_1_INDEX            130
-#define PIC_IRT_ICC_2_INDEX            131
-#define PIC_IRT_CAM_INDEX              132
-#define PIC_IRT_UART_0_INDEX           133
-#define PIC_IRT_UART_1_INDEX           134
-#define PIC_IRT_I2C_0_INDEX            135
-#define PIC_IRT_I2C_1_INDEX            136
-#define PIC_IRT_SYS_0_INDEX            137
-#define PIC_IRT_SYS_1_INDEX            138
-#define PIC_IRT_JTAG_INDEX             139
-#define PIC_IRT_PIC_INDEX              140
-#define PIC_IRT_NBU_INDEX              141
-#define PIC_IRT_TCU_INDEX              142
-#define PIC_IRT_GCU_INDEX              143 /* GBC - Global Coherency */
-#define PIC_IRT_DMC_0_INDEX            144
-#define PIC_IRT_DMC_1_INDEX            145
-
-#define PIC_NUM_GPIO_IRTS              4
-#define PIC_IRT_GPIO_0_INDEX           146
-#define PIC_IRT_GPIO_INDEX(num)                ((num) + PIC_IRT_GPIO_0_INDEX)
-
-/* 146 to 149 */
-#define PIC_IRT_NOR_INDEX              150
-#define PIC_IRT_NAND_INDEX             151
-#define PIC_IRT_SPI_INDEX              152
-#define PIC_IRT_MMC_INDEX              153
 
 #define PIC_CLOCK_TIMER                        7
 #define PIC_IRQ_BASE                   8
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/usb.h b/arch/mips/include/asm/netlogic/xlp-hal/usb.h
deleted file mode 100644 (file)
index a9cd350..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __NLM_HAL_USB_H__
-#define __NLM_HAL_USB_H__
-
-#define USB_CTL_0                      0x01
-#define USB_PHY_0                      0x0A
-#define USB_PHY_RESET                  0x01
-#define USB_PHY_PORT_RESET_0           0x10
-#define USB_PHY_PORT_RESET_1           0x20
-#define USB_CONTROLLER_RESET           0x01
-#define USB_INT_STATUS                 0x0E
-#define USB_INT_EN                     0x0F
-#define USB_PHY_INTERRUPT_EN           0x01
-#define USB_OHCI_INTERRUPT_EN          0x02
-#define USB_OHCI_INTERRUPT1_EN         0x04
-#define USB_OHCI_INTERRUPT2_EN         0x08
-#define USB_CTRL_INTERRUPT_EN          0x10
-
-#ifndef __ASSEMBLY__
-
-#define nlm_read_usb_reg(b, r)                 nlm_read_reg(b, r)
-#define nlm_write_usb_reg(b, r, v)             nlm_write_reg(b, r, v)
-#define nlm_get_usb_pcibase(node, inst)                \
-       nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
-#define nlm_get_usb_hcd_base(node, inst)       \
-       nlm_xkphys_map_pcibar0(nlm_get_usb_pcibase(node, inst))
-#define nlm_get_usb_regbase(node, inst)                \
-       (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
-
-#endif
-#endif /* __NLM_HAL_USB_H__ */
index fdc62fb5630da68bb746ed3dac53b45eb5bddc37..8b8f6b39336350b8c570b30d990f4b16332e29db 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef _ASM_PGTABLE_H
 #define _ASM_PGTABLE_H
 
+#include <linux/mm_types.h>
 #include <linux/mmzone.h>
 #ifdef CONFIG_32BIT
 #include <asm/pgtable-32.h>
index 2a5fa7abb346b8633ddda81ff40766c0e2e47aef..71686c897deaa2651bbb5808ae4192f57ea1f54f 100644 (file)
@@ -44,11 +44,16 @@ extern unsigned int vced_count, vcei_count;
 #define SPECIAL_PAGES_SIZE PAGE_SIZE
 
 #ifdef CONFIG_32BIT
+#ifdef CONFIG_KVM_GUEST
+/* User space process size is limited to 1GB in KVM Guest Mode */
+#define TASK_SIZE      0x3fff8000UL
+#else
 /*
  * User space process size: 2GB. This is hardcoded into a few places,
  * so don't change it unless you know what you are doing.
  */
 #define TASK_SIZE      0x7fff8000UL
+#endif
 
 #ifdef __KERNEL__
 #define STACK_TOP_MAX  TASK_SIZE
index 8808bf548b99d2e0b4966b162f22ed4505d046e0..1e7e0961064bdcdbeba50f3fdce48b1c7cc9029d 100644 (file)
@@ -48,4 +48,7 @@ extern void __dt_setup_arch(struct boot_param_header *bph);
 static inline void device_tree_init(void) { }
 #endif /* CONFIG_OF */
 
+extern char *mips_get_machine_name(void);
+extern void mips_set_machine_name(const char *name);
+
 #endif /* __ASM_PROM_H */
index 1a2c3025bf2892ba98feb2a338ae2cf18c5c3e4c..fdfae43d8b99e49b4f6b20565f6e97178f00a216 100644 (file)
@@ -14,6 +14,6 @@ extern void install_cpu_nmi_handler(int slice);
 extern void install_ipi(void);
 extern void setup_replication_mask(void);
 extern void replicate_kernel_text(void);
-extern pfn_t node_getfirstfree(cnodeid_t);
+extern unsigned long node_getfirstfree(cnodeid_t);
 
 #endif /* __ASM_SN_SN_PRIVATE_H */
index c4813d67aec3f5543272a53e1d87e9a1b202e193..6d24d4e8b9ed855cb27a860133e1a541c91870ab 100644 (file)
@@ -19,7 +19,6 @@ typedef signed char   partid_t;       /* partition ID type */
 typedef signed short   moduleid_t;     /* user-visible module number type */
 typedef signed short   cmoduleid_t;    /* kernel compact module id type */
 typedef unsigned char  clusterid_t;    /* Clusterid of the cell */
-typedef unsigned long  pfn_t;
 
 typedef dev_t          vertex_hdl_t;   /* hardware graph vertex handle */
 
index 5130c88d64204b398be70c968823d408bb27f93c..78d201fb6c87c93608b8295327277a6e8804818e 100644 (file)
@@ -71,7 +71,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "        nop                                            \n"
                "       srl     %[my_ticket], %[ticket], 16             \n"
                "       andi    %[ticket], %[ticket], 0xffff            \n"
-               "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
                "       bne     %[ticket], %[my_ticket], 4f             \n"
                "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
                "2:                                                     \n"
@@ -105,7 +104,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "       beqz    %[my_ticket], 1b                        \n"
                "        srl    %[my_ticket], %[ticket], 16             \n"
                "       andi    %[ticket], %[ticket], 0xffff            \n"
-               "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
                "       bne     %[ticket], %[my_ticket], 4f             \n"
                "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
                "2:                                                     \n"
@@ -153,7 +151,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "                                                       \n"
                "1:     ll      %[ticket], %[ticket_ptr]                \n"
                "       srl     %[my_ticket], %[ticket], 16             \n"
-               "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
                "       andi    %[now_serving], %[ticket], 0xffff       \n"
                "       bne     %[my_ticket], %[now_serving], 3f        \n"
                "        addu   %[ticket], %[ticket], %[inc]            \n"
@@ -178,7 +175,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "                                                       \n"
                "1:     ll      %[ticket], %[ticket_ptr]                \n"
                "       srl     %[my_ticket], %[ticket], 16             \n"
-               "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
                "       andi    %[now_serving], %[ticket], 0xffff       \n"
                "       bne     %[my_ticket], %[now_serving], 3f        \n"
                "        addu   %[ticket], %[ticket], %[inc]            \n"
@@ -242,25 +238,16 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
-               __asm__ __volatile__(
-               "       .set    noreorder       # arch_read_lock        \n"
-               "1:     ll      %1, %2                                  \n"
-               "       bltz    %1, 3f                                  \n"
-               "        addu   %1, 1                                   \n"
-               "2:     sc      %1, %0                                  \n"
-               "       beqz    %1, 1b                                  \n"
-               "        nop                                            \n"
-               "       .subsection 2                                   \n"
-               "3:     ll      %1, %2                                  \n"
-               "       bltz    %1, 3b                                  \n"
-               "        addu   %1, 1                                   \n"
-               "       b       2b                                      \n"
-               "        nop                                            \n"
-               "       .previous                                       \n"
-               "       .set    reorder                                 \n"
-               : "=m" (rw->lock), "=&r" (tmp)
-               : "m" (rw->lock)
-               : "memory");
+               do {
+                       __asm__ __volatile__(
+                       "1:     ll      %1, %2  # arch_read_lock        \n"
+                       "       bltz    %1, 1b                          \n"
+                       "        addu   %1, 1                           \n"
+                       "2:     sc      %1, %0                          \n"
+                       : "=m" (rw->lock), "=&r" (tmp)
+                       : "m" (rw->lock)
+                       : "memory");
+               } while (unlikely(!tmp));
        }
 
        smp_llsc_mb();
@@ -285,21 +272,15 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
-               __asm__ __volatile__(
-               "       .set    noreorder       # arch_read_unlock      \n"
-               "1:     ll      %1, %2                                  \n"
-               "       sub     %1, 1                                   \n"
-               "       sc      %1, %0                                  \n"
-               "       beqz    %1, 2f                                  \n"
-               "        nop                                            \n"
-               "       .subsection 2                                   \n"
-               "2:     b       1b                                      \n"
-               "        nop                                            \n"
-               "       .previous                                       \n"
-               "       .set    reorder                                 \n"
-               : "=m" (rw->lock), "=&r" (tmp)
-               : "m" (rw->lock)
-               : "memory");
+               do {
+                       __asm__ __volatile__(
+                       "1:     ll      %1, %2  # arch_read_unlock      \n"
+                       "       sub     %1, 1                           \n"
+                       "       sc      %1, %0                          \n"
+                       : "=m" (rw->lock), "=&r" (tmp)
+                       : "m" (rw->lock)
+                       : "memory");
+               } while (unlikely(!tmp));
        }
 }
 
@@ -321,25 +302,16 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
-               __asm__ __volatile__(
-               "       .set    noreorder       # arch_write_lock       \n"
-               "1:     ll      %1, %2                                  \n"
-               "       bnez    %1, 3f                                  \n"
-               "        lui    %1, 0x8000                              \n"
-               "2:     sc      %1, %0                                  \n"
-               "       beqz    %1, 3f                                  \n"
-               "        nop                                            \n"
-               "       .subsection 2                                   \n"
-               "3:     ll      %1, %2                                  \n"
-               "       bnez    %1, 3b                                  \n"
-               "        lui    %1, 0x8000                              \n"
-               "       b       2b                                      \n"
-               "        nop                                            \n"
-               "       .previous                                       \n"
-               "       .set    reorder                                 \n"
-               : "=m" (rw->lock), "=&r" (tmp)
-               : "m" (rw->lock)
-               : "memory");
+               do {
+                       __asm__ __volatile__(
+                       "1:     ll      %1, %2  # arch_write_lock       \n"
+                       "       bnez    %1, 1b                          \n"
+                       "        lui    %1, 0x8000                      \n"
+                       "2:     sc      %1, %0                          \n"
+                       : "=m" (rw->lock), "=&r" (tmp)
+                       : "m" (rw->lock)
+                       : "memory");
+               } while (unlikely(!tmp));
        }
 
        smp_llsc_mb();
@@ -424,25 +396,21 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
                : "m" (rw->lock)
                : "memory");
        } else {
-               __asm__ __volatile__(
-               "       .set    noreorder       # arch_write_trylock    \n"
-               "       li      %2, 0                                   \n"
-               "1:     ll      %1, %3                                  \n"
-               "       bnez    %1, 2f                                  \n"
-               "       lui     %1, 0x8000                              \n"
-               "       sc      %1, %0                                  \n"
-               "       beqz    %1, 3f                                  \n"
-               "        li     %2, 1                                   \n"
-               "2:                                                     \n"
-               __WEAK_LLSC_MB
-               "       .subsection 2                                   \n"
-               "3:     b       1b                                      \n"
-               "        li     %2, 0                                   \n"
-               "       .previous                                       \n"
-               "       .set    reorder                                 \n"
-               : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
-               : "m" (rw->lock)
-               : "memory");
+               do {
+                       __asm__ __volatile__(
+                       "       ll      %1, %3  # arch_write_trylock    \n"
+                       "       li      %2, 0                           \n"
+                       "       bnez    %1, 2f                          \n"
+                       "       lui     %1, 0x8000                      \n"
+                       "       sc      %1, %0                          \n"
+                       "       li      %2, 1                           \n"
+                       "2:                                             \n"
+                       : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
+                       : "m" (rw->lock)
+                       : "memory");
+               } while (unlikely(!tmp));
+
+               smp_llsc_mb();
        }
 
        return ret;
index c99384018161ec77fe54e72f1ae851b2b04c8ac3..a89d1b10d027ce65d83eecb9dd57bfda7859400e 100644 (file)
 1:             move    ra, k0
                li      k0, 3
                mtc0    k0, $22
-#endif /* CONFIG_CPU_LOONGSON2F */
+#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
                lui     k1, %hi(kernelsp)
 #else
                LONG_S  $0, PT_R0(sp)
                mfc0    v1, CP0_STATUS
                LONG_S  $2, PT_R2(sp)
+               LONG_S  v1, PT_STATUS(sp)
 #ifdef CONFIG_MIPS_MT_SMTC
                /*
                 * Ideally, these instructions would be shuffled in
                LONG_S  k0, PT_TCSTATUS(sp)
 #endif /* CONFIG_MIPS_MT_SMTC */
                LONG_S  $4, PT_R4(sp)
-               LONG_S  $5, PT_R5(sp)
-               LONG_S  v1, PT_STATUS(sp)
                mfc0    v1, CP0_CAUSE
-               LONG_S  $6, PT_R6(sp)
-               LONG_S  $7, PT_R7(sp)
+               LONG_S  $5, PT_R5(sp)
                LONG_S  v1, PT_CAUSE(sp)
+               LONG_S  $6, PT_R6(sp)
                MFC0    v1, CP0_EPC
+               LONG_S  $7, PT_R7(sp)
 #ifdef CONFIG_64BIT
                LONG_S  $8, PT_R8(sp)
                LONG_S  $9, PT_R9(sp)
 #endif
+               LONG_S  v1, PT_EPC(sp)
                LONG_S  $25, PT_R25(sp)
                LONG_S  $28, PT_R28(sp)
                LONG_S  $31, PT_R31(sp)
-               LONG_S  v1, PT_EPC(sp)
                ori     $28, sp, _THREAD_MASK
                xori    $28, _THREAD_MASK
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
index 178f7924149ad4b5e06ff4aa1a1db868734a10d8..895320e25662cd98a673980c449ec916841920d7 100644 (file)
@@ -58,8 +58,12 @@ struct thread_info {
 #define init_stack             (init_thread_union.stack)
 
 /* How to get the thread information struct from C.  */
-register struct thread_info *__current_thread_info __asm__("$28");
-#define current_thread_info()  __current_thread_info
+static inline struct thread_info *current_thread_info(void)
+{
+       register struct thread_info *__current_thread_info __asm__("$28");
+
+       return __current_thread_info;
+}
 
 #endif /* !__ASSEMBLY__ */
 
index debc8009bd5814e1f22dda89c63fd2eda3497202..2d7b9df4542dd478d53f53d8151d3381d1c58aee 100644 (file)
@@ -52,13 +52,15 @@ extern int (*perf_irq)(void);
  */
 extern unsigned int __weak get_c0_compare_int(void);
 extern int r4k_clockevent_init(void);
+extern int smtc_clockevent_init(void);
+extern int gic_clockevent_init(void);
 
 static inline int mips_clockevent_init(void)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-       extern int smtc_clockevent_init(void);
-
        return smtc_clockevent_init();
+#elif defined(CONFIG_CEVT_GIC)
+       return (gic_clockevent_init() | r4k_clockevent_init());
 #elif defined(CONFIG_CEVT_R4K)
        return r4k_clockevent_init();
 #else
@@ -69,9 +71,7 @@ static inline int mips_clockevent_init(void)
 /*
  * Initialize the count register as a clocksource
  */
-#ifdef CONFIG_CSRC_R4K
 extern int init_r4k_clocksource(void);
-#endif
 
 static inline int init_mips_clocksource(void)
 {
index bd87e36bf26a3cc980c73804bbb8a5d009f4d2ef..f3fa3750f577c2414396943871a8f0bd6df6b928 100644 (file)
  */
 #ifdef CONFIG_32BIT
 
-#define __UA_LIMIT     0x80000000UL
+#ifdef CONFIG_KVM_GUEST
+#define __UA_LIMIT 0x40000000UL
+#else
+#define __UA_LIMIT 0x80000000UL
+#endif
 
 #define __UA_ADDR      ".word"
 #define __UA_LA                "la"
@@ -55,8 +59,13 @@ extern u64 __ua_limit;
  * address in this range it's the process's problem, not ours :-)
  */
 
+#ifdef CONFIG_KVM_GUEST
+#define KERNEL_DS      ((mm_segment_t) { 0x80000000UL })
+#define USER_DS                ((mm_segment_t) { 0xC0000000UL })
+#else
 #define KERNEL_DS      ((mm_segment_t) { 0UL })
 #define USER_DS                ((mm_segment_t) { __UA_LIMIT })
+#endif
 
 #define VERIFY_READ    0
 #define VERIFY_WRITE   1
@@ -261,6 +270,7 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     " insn "        %1, %3                          \n"     \
        "2:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section .fixup,\"ax\"                          \n"     \
        "3:     li      %0, %4                                  \n"     \
        "       j       2b                                      \n"     \
@@ -287,7 +297,9 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     lw      %1, (%3)                                \n"     \
        "2:     lw      %D1, 4(%3)                              \n"     \
-       "3:     .section        .fixup,\"ax\"                   \n"     \
+       "3:                                                     \n"     \
+       "       .insn                                           \n"     \
+       "       .section        .fixup,\"ax\"                   \n"     \
        "4:     li      %0, %4                                  \n"     \
        "       move    %1, $0                                  \n"     \
        "       move    %D1, $0                                 \n"     \
@@ -355,6 +367,7 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     " insn "        %z2, %3         # __put_user_asm\n"     \
        "2:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
        "3:     li      %0, %4                                  \n"     \
        "       j       2b                                      \n"     \
@@ -373,6 +386,7 @@ do {                                                                        \
        "1:     sw      %2, (%3)        # __put_user_asm_ll32   \n"     \
        "2:     sw      %D2, 4(%3)                              \n"     \
        "3:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
        "4:     li      %0, %4                                  \n"     \
        "       j       3b                                      \n"     \
@@ -524,6 +538,7 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     " insn "        %1, %3                          \n"     \
        "2:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section .fixup,\"ax\"                          \n"     \
        "3:     li      %0, %4                                  \n"     \
        "       j       2b                                      \n"     \
@@ -549,7 +564,9 @@ do {                                                                        \
        "1:     ulw     %1, (%3)                                \n"     \
        "2:     ulw     %D1, 4(%3)                              \n"     \
        "       move    %0, $0                                  \n"     \
-       "3:     .section        .fixup,\"ax\"                   \n"     \
+       "3:                                                     \n"     \
+       "       .insn                                           \n"     \
+       "       .section        .fixup,\"ax\"                   \n"     \
        "4:     li      %0, %4                                  \n"     \
        "       move    %1, $0                                  \n"     \
        "       move    %D1, $0                                 \n"     \
@@ -616,6 +633,7 @@ do {                                                                        \
        __asm__ __volatile__(                                           \
        "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" \
        "2:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
        "3:     li      %0, %4                                  \n"     \
        "       j       2b                                      \n"     \
@@ -634,6 +652,7 @@ do {                                                                        \
        "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
        "2:     sw      %D2, 4(%3)                              \n"     \
        "3:                                                     \n"     \
+       "       .insn                                           \n"     \
        "       .section        .fixup,\"ax\"                   \n"     \
        "4:     li      %0, %4                                  \n"     \
        "       j       3b                                      \n"     \
index 058e941626a6ab51e823fa352e8c1f63e1d06624..370d967725c28d0e59d26ad8726f2ad052d5d2fa 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
  * Copyright (C) 2005  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2012  MIPS Technologies, Inc.
+ * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
  */
 
 #include <linux/types.h>
 #define UASM_EXPORT_SYMBOL(sym)
 #endif
 
+#define _UASM_ISA_CLASSIC      0
+#define _UASM_ISA_MICROMIPS    1
+
+#ifndef UASM_ISA
+#ifdef CONFIG_CPU_MICROMIPS
+#define UASM_ISA       _UASM_ISA_MICROMIPS
+#else
+#define UASM_ISA       _UASM_ISA_CLASSIC
+#endif
+#endif
+
+#if (UASM_ISA == _UASM_ISA_CLASSIC)
+#ifdef CONFIG_CPU_MICROMIPS
+#define ISAOPC(op)     CL_uasm_i##op
+#define ISAFUNC(x)     CL_##x
+#else
+#define ISAOPC(op)     uasm_i##op
+#define ISAFUNC(x)     x
+#endif
+#elif (UASM_ISA == _UASM_ISA_MICROMIPS)
+#ifdef CONFIG_CPU_MICROMIPS
+#define ISAOPC(op)     uasm_i##op
+#define ISAFUNC(x)     x
+#else
+#define ISAOPC(op)     MM_uasm_i##op
+#define ISAFUNC(x)     MM_##x
+#endif
+#else
+#error Unsupported micro-assembler ISA!!!
+#endif
+
 #define Ip_u1u2u3(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
 
 #define Ip_u2u1u3(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
 
 #define Ip_u3u1u2(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
 
 #define Ip_u1u2s3(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 
 #define Ip_u2s3u1(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
+ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
 
 #define Ip_u2u1s3(op)                                                  \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 
 #define Ip_u2u1msbu3(op)                                               \
 void __uasminit                                                                \
-uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c,  \
+ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c,  \
           unsigned int d)
 
 #define Ip_u1u2(op)                                                    \
-void __uasminit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
+void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
 
 #define Ip_u1s2(op)                                                    \
-void __uasminit uasm_i##op(u32 **buf, unsigned int a, signed int b)
+void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
 
-#define Ip_u1(op) void __uasminit uasm_i##op(u32 **buf, unsigned int a)
+#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a)
 
-#define Ip_0(op) void __uasminit uasm_i##op(u32 **buf)
+#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf)
 
 Ip_u2u1s3(_addiu);
 Ip_u3u1u2(_addu);
@@ -132,19 +163,20 @@ struct uasm_label {
        int lab;
 };
 
-void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
+void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
+                       int lid);
 #ifdef CONFIG_64BIT
-int uasm_in_compat_space_p(long addr);
+int ISAFUNC(uasm_in_compat_space_p)(long addr);
 #endif
-int uasm_rel_hi(long val);
-int uasm_rel_lo(long val);
-void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
-void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
+int ISAFUNC(uasm_rel_hi)(long val);
+int ISAFUNC(uasm_rel_lo)(long val);
+void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
+void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
 
 #define UASM_L_LA(lb)                                                  \
-static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
+static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
 {                                                                      \
-       uasm_build_label(lab, addr, label##lb);                         \
+       ISAFUNC(uasm_build_label)(lab, addr, label##lb);                \
 }
 
 /* convenience macros for instructions */
@@ -196,27 +228,27 @@ static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
                                     unsigned int a2, unsigned int a3)
 {
        if (a3 < 32)
-               uasm_i_drotr(p, a1, a2, a3);
+               ISAOPC(_drotr)(p, a1, a2, a3);
        else
-               uasm_i_drotr32(p, a1, a2, a3 - 32);
+               ISAOPC(_drotr32)(p, a1, a2, a3 - 32);
 }
 
 static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
                                    unsigned int a2, unsigned int a3)
 {
        if (a3 < 32)
-               uasm_i_dsll(p, a1, a2, a3);
+               ISAOPC(_dsll)(p, a1, a2, a3);
        else
-               uasm_i_dsll32(p, a1, a2, a3 - 32);
+               ISAOPC(_dsll32)(p, a1, a2, a3 - 32);
 }
 
 static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
                                    unsigned int a2, unsigned int a3)
 {
        if (a3 < 32)
-               uasm_i_dsrl(p, a1, a2, a3);
+               ISAOPC(_dsrl)(p, a1, a2, a3);
        else
-               uasm_i_dsrl32(p, a1, a2, a3 - 32);
+               ISAOPC(_dsrl32)(p, a1, a2, a3 - 32);
 }
 
 /* Handle relocations. */
index 4d078815eaa5448d933ecce88e1ada2503f629ce..0f4aec2ad1e6e1c9f7d8b7a1b54cf49ebb58c392 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright (C) 1996, 2000 by Ralf Baechle
  * Copyright (C) 2006 by Thiemo Seufer
+ * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #ifndef _UAPI_ASM_INST_H
 #define _UAPI_ASM_INST_H
@@ -192,6 +193,282 @@ enum lx_func {
        lbx_op  = 0x16,
 };
 
+/*
+ * (microMIPS) Major opcodes.
+ */
+enum mm_major_op {
+       mm_pool32a_op, mm_pool16a_op, mm_lbu16_op, mm_move16_op,
+       mm_addi32_op, mm_lbu32_op, mm_sb32_op, mm_lb32_op,
+       mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op,
+       mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op,
+       mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op,
+       mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op,
+       mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op,
+       mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op,
+       mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op,
+       mm_slti32_op, mm_beq32_op, mm_swc132_op, mm_lwc132_op,
+       mm_reserved5_op, mm_reserved6_op, mm_sh16_op, mm_bnez16_op,
+       mm_sltiu32_op, mm_bne32_op, mm_sdc132_op, mm_ldc132_op,
+       mm_reserved7_op, mm_reserved8_op, mm_swsp16_op, mm_b16_op,
+       mm_andi32_op, mm_j32_op, mm_sd32_op, mm_ld32_op,
+       mm_reserved11_op, mm_reserved12_op, mm_sw16_op, mm_li16_op,
+       mm_jalx32_op, mm_jal32_op, mm_sw32_op, mm_lw32_op,
+};
+
+/*
+ * (microMIPS) POOL32I minor opcodes.
+ */
+enum mm_32i_minor_op {
+       mm_bltz_op, mm_bltzal_op, mm_bgez_op, mm_bgezal_op,
+       mm_blez_op, mm_bnezc_op, mm_bgtz_op, mm_beqzc_op,
+       mm_tlti_op, mm_tgei_op, mm_tltiu_op, mm_tgeiu_op,
+       mm_tnei_op, mm_lui_op, mm_teqi_op, mm_reserved13_op,
+       mm_synci_op, mm_bltzals_op, mm_reserved14_op, mm_bgezals_op,
+       mm_bc2f_op, mm_bc2t_op, mm_reserved15_op, mm_reserved16_op,
+       mm_reserved17_op, mm_reserved18_op, mm_bposge64_op, mm_bposge32_op,
+       mm_bc1f_op, mm_bc1t_op, mm_reserved19_op, mm_reserved20_op,
+       mm_bc1any2f_op, mm_bc1any2t_op, mm_bc1any4f_op, mm_bc1any4t_op,
+};
+
+/*
+ * (microMIPS) POOL32A minor opcodes.
+ */
+enum mm_32a_minor_op {
+       mm_sll32_op = 0x000,
+       mm_ins_op = 0x00c,
+       mm_ext_op = 0x02c,
+       mm_pool32axf_op = 0x03c,
+       mm_srl32_op = 0x040,
+       mm_sra_op = 0x080,
+       mm_rotr_op = 0x0c0,
+       mm_lwxs_op = 0x118,
+       mm_addu32_op = 0x150,
+       mm_subu32_op = 0x1d0,
+       mm_and_op = 0x250,
+       mm_or32_op = 0x290,
+       mm_xor32_op = 0x310,
+};
+
+/*
+ * (microMIPS) POOL32B functions.
+ */
+enum mm_32b_func {
+       mm_lwc2_func = 0x0,
+       mm_lwp_func = 0x1,
+       mm_ldc2_func = 0x2,
+       mm_ldp_func = 0x4,
+       mm_lwm32_func = 0x5,
+       mm_cache_func = 0x6,
+       mm_ldm_func = 0x7,
+       mm_swc2_func = 0x8,
+       mm_swp_func = 0x9,
+       mm_sdc2_func = 0xa,
+       mm_sdp_func = 0xc,
+       mm_swm32_func = 0xd,
+       mm_sdm_func = 0xf,
+};
+
+/*
+ * (microMIPS) POOL32C functions.
+ */
+enum mm_32c_func {
+       mm_pref_func = 0x2,
+       mm_ll_func = 0x3,
+       mm_swr_func = 0x9,
+       mm_sc_func = 0xb,
+       mm_lwu_func = 0xe,
+};
+
+/*
+ * (microMIPS) POOL32AXF minor opcodes.
+ */
+enum mm_32axf_minor_op {
+       mm_mfc0_op = 0x003,
+       mm_mtc0_op = 0x00b,
+       mm_tlbp_op = 0x00d,
+       mm_jalr_op = 0x03c,
+       mm_tlbr_op = 0x04d,
+       mm_jalrhb_op = 0x07c,
+       mm_tlbwi_op = 0x08d,
+       mm_tlbwr_op = 0x0cd,
+       mm_jalrs_op = 0x13c,
+       mm_jalrshb_op = 0x17c,
+       mm_syscall_op = 0x22d,
+       mm_eret_op = 0x3cd,
+};
+
+/*
+ * (microMIPS) POOL32F minor opcodes.
+ */
+enum mm_32f_minor_op {
+       mm_32f_00_op = 0x00,
+       mm_32f_01_op = 0x01,
+       mm_32f_02_op = 0x02,
+       mm_32f_10_op = 0x08,
+       mm_32f_11_op = 0x09,
+       mm_32f_12_op = 0x0a,
+       mm_32f_20_op = 0x10,
+       mm_32f_30_op = 0x18,
+       mm_32f_40_op = 0x20,
+       mm_32f_41_op = 0x21,
+       mm_32f_42_op = 0x22,
+       mm_32f_50_op = 0x28,
+       mm_32f_51_op = 0x29,
+       mm_32f_52_op = 0x2a,
+       mm_32f_60_op = 0x30,
+       mm_32f_70_op = 0x38,
+       mm_32f_73_op = 0x3b,
+       mm_32f_74_op = 0x3c,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_10_minor_op {
+       mm_lwxc1_op = 0x1,
+       mm_swxc1_op,
+       mm_ldxc1_op,
+       mm_sdxc1_op,
+       mm_luxc1_op,
+       mm_suxc1_op,
+};
+
+enum mm_32f_func {
+       mm_lwxc1_func = 0x048,
+       mm_swxc1_func = 0x088,
+       mm_ldxc1_func = 0x0c8,
+       mm_sdxc1_func = 0x108,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_40_minor_op {
+       mm_fmovf_op,
+       mm_fmovt_op,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_60_minor_op {
+       mm_fadd_op,
+       mm_fsub_op,
+       mm_fmul_op,
+       mm_fdiv_op,
+};
+
+/*
+ * (microMIPS) POOL32F secondary minor opcodes.
+ */
+enum mm_32f_70_minor_op {
+       mm_fmovn_op,
+       mm_fmovz_op,
+};
+
+/*
+ * (microMIPS) POOL32FXF secondary minor opcodes for POOL32F.
+ */
+enum mm_32f_73_minor_op {
+       mm_fmov0_op = 0x01,
+       mm_fcvtl_op = 0x04,
+       mm_movf0_op = 0x05,
+       mm_frsqrt_op = 0x08,
+       mm_ffloorl_op = 0x0c,
+       mm_fabs0_op = 0x0d,
+       mm_fcvtw_op = 0x24,
+       mm_movt0_op = 0x25,
+       mm_fsqrt_op = 0x28,
+       mm_ffloorw_op = 0x2c,
+       mm_fneg0_op = 0x2d,
+       mm_cfc1_op = 0x40,
+       mm_frecip_op = 0x48,
+       mm_fceill_op = 0x4c,
+       mm_fcvtd0_op = 0x4d,
+       mm_ctc1_op = 0x60,
+       mm_fceilw_op = 0x6c,
+       mm_fcvts0_op = 0x6d,
+       mm_mfc1_op = 0x80,
+       mm_fmov1_op = 0x81,
+       mm_movf1_op = 0x85,
+       mm_ftruncl_op = 0x8c,
+       mm_fabs1_op = 0x8d,
+       mm_mtc1_op = 0xa0,
+       mm_movt1_op = 0xa5,
+       mm_ftruncw_op = 0xac,
+       mm_fneg1_op = 0xad,
+       mm_froundl_op = 0xcc,
+       mm_fcvtd1_op = 0xcd,
+       mm_froundw_op = 0xec,
+       mm_fcvts1_op = 0xed,
+};
+
+/*
+ * (microMIPS) POOL16C minor opcodes.
+ */
+enum mm_16c_minor_op {
+       mm_lwm16_op = 0x04,
+       mm_swm16_op = 0x05,
+       mm_jr16_op = 0x18,
+       mm_jrc_op = 0x1a,
+       mm_jalr16_op = 0x1c,
+       mm_jalrs16_op = 0x1e,
+};
+
+/*
+ * (microMIPS) POOL16D minor opcodes.
+ */
+enum mm_16d_minor_op {
+       mm_addius5_func,
+       mm_addiusp_func,
+};
+
+/*
+ * (MIPS16e) opcodes.
+ */
+enum MIPS16e_ops {
+       MIPS16e_jal_op = 003,
+       MIPS16e_ld_op = 007,
+       MIPS16e_i8_op = 014,
+       MIPS16e_sd_op = 017,
+       MIPS16e_lb_op = 020,
+       MIPS16e_lh_op = 021,
+       MIPS16e_lwsp_op = 022,
+       MIPS16e_lw_op = 023,
+       MIPS16e_lbu_op = 024,
+       MIPS16e_lhu_op = 025,
+       MIPS16e_lwpc_op = 026,
+       MIPS16e_lwu_op = 027,
+       MIPS16e_sb_op = 030,
+       MIPS16e_sh_op = 031,
+       MIPS16e_swsp_op = 032,
+       MIPS16e_sw_op = 033,
+       MIPS16e_rr_op = 035,
+       MIPS16e_extend_op = 036,
+       MIPS16e_i64_op = 037,
+};
+
+enum MIPS16e_i64_func {
+       MIPS16e_ldsp_func,
+       MIPS16e_sdsp_func,
+       MIPS16e_sdrasp_func,
+       MIPS16e_dadjsp_func,
+       MIPS16e_ldpc_func,
+};
+
+enum MIPS16e_rr_func {
+       MIPS16e_jr_func,
+};
+
+enum MIPS6e_i8_func {
+       MIPS16e_swrasp_func = 02,
+};
+
+/*
+ * (microMIPS & MIPS16e) NOP instruction.
+ */
+#define MM_NOP16       0x0c00
+
 /*
  * Damn ...  bitfields depend from byteorder :-(
  */
@@ -311,6 +588,262 @@ struct v_format {                         /* MDMX vector format */
        ;)))))))
 };
 
+/*
+ * microMIPS instruction formats (32-bit length)
+ *
+ * NOTE:
+ *     Parenthesis denote whether the format is a microMIPS instruction or
+ *     if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.
+ */
+struct fb_format {             /* FPU branch format (MIPS32) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int bc : 5,
+       BITFIELD_FIELD(unsigned int cc : 3,
+       BITFIELD_FIELD(unsigned int flag : 2,
+       BITFIELD_FIELD(signed int simmediate : 16,
+       ;)))))
+};
+
+struct fp0_format {            /* FPU multiply and add format (MIPS32) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int fmt : 5,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp0_format {         /* FPU multipy and add format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int fmt : 3,
+       BITFIELD_FIELD(unsigned int op : 2,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;)))))))
+};
+
+struct fp1_format {            /* FPU mfc1 and cfc1 format (MIPS32) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int op : 5,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp1_format {         /* FPU mfc1 and cfc1 format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fmt : 2,
+       BITFIELD_FIELD(unsigned int op : 8,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp2_format {         /* FPU movt and movf format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int cc : 3,
+       BITFIELD_FIELD(unsigned int zero : 2,
+       BITFIELD_FIELD(unsigned int fmt : 2,
+       BITFIELD_FIELD(unsigned int op : 3,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))))
+};
+
+struct mm_fp3_format {         /* FPU abs and neg format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fmt : 3,
+       BITFIELD_FIELD(unsigned int op : 7,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp4_format {         /* FPU c.cond format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int cc : 3,
+       BITFIELD_FIELD(unsigned int fmt : 3,
+       BITFIELD_FIELD(unsigned int cond : 4,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;)))))))
+};
+
+struct mm_fp5_format {         /* FPU lwxc1 and swxc1 format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int index : 5,
+       BITFIELD_FIELD(unsigned int base : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int op : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct fp6_format {            /* FPU madd and msub format (MIPS IV) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int fr : 5,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_fp6_format {         /* FPU madd and msub format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int ft : 5,
+       BITFIELD_FIELD(unsigned int fs : 5,
+       BITFIELD_FIELD(unsigned int fd : 5,
+       BITFIELD_FIELD(unsigned int fr : 5,
+       BITFIELD_FIELD(unsigned int func : 6,
+       ;))))))
+};
+
+struct mm_i_format {           /* Immediate format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(unsigned int rs : 5,
+       BITFIELD_FIELD(signed int simmediate : 16,
+       ;))))
+};
+
+struct mm_m_format {           /* Multi-word load/store format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rd : 5,
+       BITFIELD_FIELD(unsigned int base : 5,
+       BITFIELD_FIELD(unsigned int func : 4,
+       BITFIELD_FIELD(signed int simmediate : 12,
+       ;)))))
+};
+
+struct mm_x_format {           /* Scaled indexed load format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int index : 5,
+       BITFIELD_FIELD(unsigned int base : 5,
+       BITFIELD_FIELD(unsigned int rd : 5,
+       BITFIELD_FIELD(unsigned int func : 11,
+       ;)))))
+};
+
+/*
+ * microMIPS instruction formats (16-bit length)
+ */
+struct mm_b0_format {          /* Unconditional branch format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(signed int simmediate : 10,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;)))
+};
+
+struct mm_b1_format {          /* Conditional branch format (microMIPS) */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rs : 3,
+       BITFIELD_FIELD(signed int simmediate : 7,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;))))
+};
+
+struct mm16_m_format {         /* Multi-word load/store format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int func : 4,
+       BITFIELD_FIELD(unsigned int rlist : 2,
+       BITFIELD_FIELD(unsigned int imm : 4,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;)))))
+};
+
+struct mm16_rb_format {                /* Signed immediate format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 3,
+       BITFIELD_FIELD(unsigned int base : 3,
+       BITFIELD_FIELD(signed int simmediate : 4,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;)))))
+};
+
+struct mm16_r3_format {                /* Load from global pointer format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 3,
+       BITFIELD_FIELD(signed int simmediate : 7,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;))))
+};
+
+struct mm16_r5_format {                /* Load/store from stack pointer format */
+       BITFIELD_FIELD(unsigned int opcode : 6,
+       BITFIELD_FIELD(unsigned int rt : 5,
+       BITFIELD_FIELD(signed int simmediate : 5,
+       BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+       ;))))
+};
+
+/*
+ * MIPS16e instruction formats (16-bit length)
+ */
+struct m16e_rr {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int rx : 3,
+       BITFIELD_FIELD(unsigned int nd : 1,
+       BITFIELD_FIELD(unsigned int l : 1,
+       BITFIELD_FIELD(unsigned int ra : 1,
+       BITFIELD_FIELD(unsigned int func : 5,
+       ;))))))
+};
+
+struct m16e_jal {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int x : 1,
+       BITFIELD_FIELD(unsigned int imm20_16 : 5,
+       BITFIELD_FIELD(signed int imm25_21 : 5,
+       ;))))
+};
+
+struct m16e_i64 {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int func : 3,
+       BITFIELD_FIELD(unsigned int imm : 8,
+       ;)))
+};
+
+struct m16e_ri64 {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int func : 3,
+       BITFIELD_FIELD(unsigned int ry : 3,
+       BITFIELD_FIELD(unsigned int imm : 5,
+       ;))))
+};
+
+struct m16e_ri {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int rx : 3,
+       BITFIELD_FIELD(unsigned int imm : 8,
+       ;)))
+};
+
+struct m16e_rri {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int rx : 3,
+       BITFIELD_FIELD(unsigned int ry : 3,
+       BITFIELD_FIELD(unsigned int imm : 5,
+       ;))))
+};
+
+struct m16e_i8 {
+       BITFIELD_FIELD(unsigned int opcode : 5,
+       BITFIELD_FIELD(unsigned int func : 3,
+       BITFIELD_FIELD(unsigned int imm : 8,
+       ;)))
+};
+
 union mips_instruction {
        unsigned int word;
        unsigned short halfword[2];
@@ -326,6 +859,37 @@ union mips_instruction {
        struct b_format b_format;
        struct ps_format ps_format;
        struct v_format v_format;
+       struct fb_format fb_format;
+       struct fp0_format fp0_format;
+       struct mm_fp0_format mm_fp0_format;
+       struct fp1_format fp1_format;
+       struct mm_fp1_format mm_fp1_format;
+       struct mm_fp2_format mm_fp2_format;
+       struct mm_fp3_format mm_fp3_format;
+       struct mm_fp4_format mm_fp4_format;
+       struct mm_fp5_format mm_fp5_format;
+       struct fp6_format fp6_format;
+       struct mm_fp6_format mm_fp6_format;
+       struct mm_i_format mm_i_format;
+       struct mm_m_format mm_m_format;
+       struct mm_x_format mm_x_format;
+       struct mm_b0_format mm_b0_format;
+       struct mm_b1_format mm_b1_format;
+       struct mm16_m_format mm16_m_format ;
+       struct mm16_rb_format mm16_rb_format;
+       struct mm16_r3_format mm16_r3_format;
+       struct mm16_r5_format mm16_r5_format;
+};
+
+union mips16e_instruction {
+       unsigned int full : 16;
+       struct m16e_rr rr;
+       struct m16e_jal jal;
+       struct m16e_i64 i64;
+       struct m16e_ri64 ri64;
+       struct m16e_ri ri;
+       struct m16e_rri rri;
+       struct m16e_i8 i8;
 };
 
 #endif /* _UAPI_ASM_INST_H */
index 520a908d45d62af6d3edb1ef83b2f0f990faa562..6ad9e04bdf6210a8b722e92aca5e49161cb4deca 100644 (file)
@@ -5,7 +5,7 @@
 extra-y                := head.o vmlinux.lds
 
 obj-y          += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
-                  ptrace.o reset.o setup.o signal.o syscall.o \
+                  prom.o ptrace.o reset.o setup.o signal.o syscall.o \
                   time.o topology.o traps.o unaligned.o watch.o vdso.o
 
 ifdef CONFIG_FUNCTION_TRACER
@@ -19,15 +19,16 @@ obj-$(CONFIG_CEVT_BCM1480)  += cevt-bcm1480.o
 obj-$(CONFIG_CEVT_R4K)         += cevt-r4k.o
 obj-$(CONFIG_MIPS_MT_SMTC)     += cevt-smtc.o
 obj-$(CONFIG_CEVT_DS1287)      += cevt-ds1287.o
+obj-$(CONFIG_CEVT_GIC)         += cevt-gic.o
 obj-$(CONFIG_CEVT_GT641XX)     += cevt-gt641xx.o
 obj-$(CONFIG_CEVT_SB1250)      += cevt-sb1250.o
 obj-$(CONFIG_CEVT_TXX9)                += cevt-txx9.o
 obj-$(CONFIG_CSRC_BCM1480)     += csrc-bcm1480.o
+obj-$(CONFIG_CSRC_GIC)         += csrc-gic.o
 obj-$(CONFIG_CSRC_IOASIC)      += csrc-ioasic.o
 obj-$(CONFIG_CSRC_POWERTV)     += csrc-powertv.o
 obj-$(CONFIG_CSRC_R4K)         += csrc-r4k.o
 obj-$(CONFIG_CSRC_SB1250)      += csrc-sb1250.o
-obj-$(CONFIG_CSRC_GIC)         += csrc-gic.o
 obj-$(CONFIG_SYNC_R4K)         += sync-r4k.o
 
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
@@ -86,8 +87,6 @@ obj-$(CONFIG_EARLY_PRINTK)    += early_printk.o
 obj-$(CONFIG_SPINLOCK_TEST)    += spinlock_test.o
 obj-$(CONFIG_MIPS_MACHINE)     += mips_machine.o
 
-obj-$(CONFIG_OF)               += prom.o
-
 CFLAGS_cpu-bugs64.o    = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
 
 obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT)  += 8250-platform.o
index 50285b2c7ffe30d5a54d877f7f0cac075dd59f8c..0845091ba480bba5313e32f7e12c2a0b79b83048 100644 (file)
@@ -17,6 +17,8 @@
 #include <asm/ptrace.h>
 #include <asm/processor.h>
 
+#include <linux/kvm_host.h>
+
 void output_ptreg_defines(void)
 {
        COMMENT("MIPS pt_regs offsets.");
@@ -328,3 +330,67 @@ void output_pbe_defines(void)
        BLANK();
 }
 #endif
+
+void output_kvm_defines(void)
+{
+       COMMENT(" KVM/MIPS Specfic offsets. ");
+       DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
+       OFFSET(VCPU_RUN, kvm_vcpu, run);
+       OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
+
+       OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
+       OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
+
+       OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
+       OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
+
+       OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
+       OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
+       OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
+       OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
+
+       OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
+
+       OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
+       OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
+       OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
+       OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
+       OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
+       OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
+       OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
+       OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
+       OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
+       OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
+       OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
+       OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
+       OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
+       OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
+       OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
+       OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
+       OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
+       OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
+       OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
+       OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
+       OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
+       OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
+       OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
+       OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
+       OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
+       OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
+       OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
+       OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
+       OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
+       OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
+       OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
+       OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
+       OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
+       OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
+       OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
+       OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
+       OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
+       OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
+
+       OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
+       OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
+       BLANK();
+}
index 556a4357d7fc69f79b024fd968ef409bcbe20fb8..97c5a1668e5347bb4a7668882931349cfdac08a6 100644 (file)
@@ -48,7 +48,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
        __res;                                                          \
 })
 
+#ifdef CONFIG_KVM_GUEST
+#define TASK32_SIZE            0x3fff8000UL
+#else
 #define TASK32_SIZE            0x7fff8000UL
+#endif
 #undef ELF_ET_DYN_BASE
 #define ELF_ET_DYN_BASE                (TASK32_SIZE / 3 * 2)
 
index 83ffe950f710f86c985d56e9febabb0aafe39d75..46c2ad0703a0b1040140b4a2041c179fdaa45b34 100644 (file)
 #include <asm/cpu.h>
 #include <asm/cpu-features.h>
 #include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
 #include <asm/inst.h>
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
+/*
+ * Calculate and return exception PC in case of branch delay slot
+ * for microMIPS and MIPS16e. It does not clear the ISA mode bit.
+ */
+int __isa_exception_epc(struct pt_regs *regs)
+{
+       unsigned short inst;
+       long epc = regs->cp0_epc;
+
+       /* Calculate exception PC in branch delay slot. */
+       if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
+               /* This should never happen because delay slot was checked. */
+               force_sig(SIGSEGV, current);
+               return epc;
+       }
+       if (cpu_has_mips16) {
+               if (((union mips16e_instruction)inst).ri.opcode
+                               == MIPS16e_jal_op)
+                       epc += 4;
+               else
+                       epc += 2;
+       } else if (mm_insn_16bit(inst))
+               epc += 2;
+       else
+               epc += 4;
+
+       return epc;
+}
+
+/*
+ * Compute return address and emulate branch in microMIPS mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __microMIPS_compute_return_epc(struct pt_regs *regs)
+{
+       u16 __user *pc16;
+       u16 halfword;
+       unsigned int word;
+       unsigned long contpc;
+       struct mm_decoded_insn mminsn = { 0 };
+
+       mminsn.micro_mips_mode = 1;
+
+       /* This load never faults. */
+       pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+       __get_user(halfword, pc16);
+       pc16++;
+       contpc = regs->cp0_epc + 2;
+       word = ((unsigned int)halfword << 16);
+       mminsn.pc_inc = 2;
+
+       if (!mm_insn_16bit(halfword)) {
+               __get_user(halfword, pc16);
+               pc16++;
+               contpc = regs->cp0_epc + 4;
+               mminsn.pc_inc = 4;
+               word |= halfword;
+       }
+       mminsn.insn = word;
+
+       if (get_user(halfword, pc16))
+               goto sigsegv;
+       mminsn.next_pc_inc = 2;
+       word = ((unsigned int)halfword << 16);
+
+       if (!mm_insn_16bit(halfword)) {
+               pc16++;
+               if (get_user(halfword, pc16))
+                       goto sigsegv;
+               mminsn.next_pc_inc = 4;
+               word |= halfword;
+       }
+       mminsn.next_insn = word;
+
+       mm_isBranchInstr(regs, mminsn, &contpc);
+
+       regs->cp0_epc = contpc;
+
+       return 0;
+
+sigsegv:
+       force_sig(SIGSEGV, current);
+       return -EFAULT;
+}
+
+/*
+ * Compute return address and emulate branch in MIPS16e mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __MIPS16e_compute_return_epc(struct pt_regs *regs)
+{
+       u16 __user *addr;
+       union mips16e_instruction inst;
+       u16 inst2;
+       u32 fullinst;
+       long epc;
+
+       epc = regs->cp0_epc;
+
+       /* Read the instruction. */
+       addr = (u16 __user *)msk_isa16_mode(epc);
+       if (__get_user(inst.full, addr)) {
+               force_sig(SIGSEGV, current);
+               return -EFAULT;
+       }
+
+       switch (inst.ri.opcode) {
+       case MIPS16e_extend_op:
+               regs->cp0_epc += 4;
+               return 0;
+
+               /*
+                *  JAL and JALX in MIPS16e mode
+                */
+       case MIPS16e_jal_op:
+               addr += 1;
+               if (__get_user(inst2, addr)) {
+                       force_sig(SIGSEGV, current);
+                       return -EFAULT;
+               }
+               fullinst = ((unsigned)inst.full << 16) | inst2;
+               regs->regs[31] = epc + 6;
+               epc += 4;
+               epc >>= 28;
+               epc <<= 28;
+               /*
+                * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
+                *
+                * ......TARGET[15:0].................TARGET[20:16]...........
+                * ......TARGET[25:21]
+                */
+               epc |=
+                   ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
+                   ((fullinst & 0x1f0000) << 7);
+               if (!inst.jal.x)
+                       set_isa16_mode(epc);    /* Set ISA mode bit. */
+               regs->cp0_epc = epc;
+               return 0;
+
+               /*
+                *  J(AL)R(C)
+                */
+       case MIPS16e_rr_op:
+               if (inst.rr.func == MIPS16e_jr_func) {
+
+                       if (inst.rr.ra)
+                               regs->cp0_epc = regs->regs[31];
+                       else
+                               regs->cp0_epc =
+                                   regs->regs[reg16to32[inst.rr.rx]];
+
+                       if (inst.rr.l) {
+                               if (inst.rr.nd)
+                                       regs->regs[31] = epc + 2;
+                               else
+                                       regs->regs[31] = epc + 4;
+                       }
+                       return 0;
+               }
+               break;
+       }
+
+       /*
+        * All other cases have no branch delay slot and are 16-bits.
+        * Branches do not cause an exception.
+        */
+       regs->cp0_epc += 2;
+
+       return 0;
+}
+
 /**
  * __compute_return_epc_for_insn - Computes the return address and do emulate
  *                                 branch simulation, if required.
@@ -129,6 +305,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                epc <<= 28;
                epc |= (insn.j_format.target << 2);
                regs->cp0_epc = epc;
+               if (insn.i_format.opcode == jalx_op)
+                       set_isa16_mode(regs->cp0_epc);
                break;
 
        /*
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
new file mode 100644 (file)
index 0000000..730eaf9
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013  Imagination Technologies Ltd.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+#include <asm/gic.h>
+#include <asm/mips-boards/maltaint.h>
+
+DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
+int gic_timer_irq_installed;
+
+
+static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+       u64 cnt;
+       int res;
+
+       cnt = gic_read_count();
+       cnt += (u64)delta;
+       gic_write_compare(cnt);
+       res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
+       return res;
+}
+
+void gic_set_clock_mode(enum clock_event_mode mode,
+                               struct clock_event_device *evt)
+{
+       /* Nothing to do ...  */
+}
+
+irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *cd;
+       int cpu = smp_processor_id();
+
+       gic_write_compare(gic_read_compare());
+       cd = &per_cpu(gic_clockevent_device, cpu);
+       cd->event_handler(cd);
+       return IRQ_HANDLED;
+}
+
+struct irqaction gic_compare_irqaction = {
+       .handler = gic_compare_interrupt,
+       .flags = IRQF_PERCPU | IRQF_TIMER,
+       .name = "timer",
+};
+
+
+void gic_event_handler(struct clock_event_device *dev)
+{
+}
+
+int __cpuinit gic_clockevent_init(void)
+{
+       unsigned int cpu = smp_processor_id();
+       struct clock_event_device *cd;
+       unsigned int irq;
+
+       if (!cpu_has_counter || !gic_frequency)
+               return -ENXIO;
+
+       irq = MIPS_GIC_IRQ_BASE;
+
+       cd = &per_cpu(gic_clockevent_device, cpu);
+
+       cd->name                = "MIPS GIC";
+       cd->features            = CLOCK_EVT_FEAT_ONESHOT;
+
+       clockevent_set_clock(cd, gic_frequency);
+
+       /* Calculate the min / max delta */
+       cd->max_delta_ns        = clockevent_delta2ns(0x7fffffff, cd);
+       cd->min_delta_ns        = clockevent_delta2ns(0x300, cd);
+
+       cd->rating              = 300;
+       cd->irq                 = irq;
+       cd->cpumask             = cpumask_of(cpu);
+       cd->set_next_event      = gic_next_event;
+       cd->set_mode            = gic_set_clock_mode;
+       cd->event_handler       = gic_event_handler;
+
+       clockevents_register_device(cd);
+
+       GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
+       GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
+
+       if (gic_timer_irq_installed)
+               return 0;
+
+       gic_timer_irq_installed = 1;
+
+       setup_irq(irq, &gic_compare_irqaction);
+       irq_set_handler(irq, handle_percpu_irq);
+       return 0;
+}
index 07b847d77f5da029dca6a6cbb53198807ff781a8..02033eaf8825420eea30105e92edcc795d8b5b70 100644 (file)
@@ -23,7 +23,6 @@
  */
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 static int mips_next_event(unsigned long delta,
                           struct clock_event_device *evt)
 {
@@ -49,7 +48,6 @@ DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
 int cp0_timer_irq_installed;
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 {
        const int r2 = cpu_has_mips_r2;
@@ -74,6 +72,9 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
                /* Clear Count/Compare Interrupt */
                write_c0_compare(read_c0_compare());
                cd = &per_cpu(mips_clockevent_device, cpu);
+#ifdef CONFIG_CEVT_GIC
+               if (!gic_present)
+#endif
                cd->event_handler(cd);
        }
 
@@ -118,6 +119,10 @@ int c0_compare_int_usable(void)
        unsigned int delta;
        unsigned int cnt;
 
+#ifdef CONFIG_KVM_GUEST
+    return 1;
+#endif
+
        /*
         * IP7 already pending?  Try to clear it by acking the timer.
         */
@@ -166,7 +171,6 @@ int c0_compare_int_usable(void)
 }
 
 #ifndef CONFIG_MIPS_MT_SMTC
-
 int __cpuinit r4k_clockevent_init(void)
 {
        unsigned int cpu = smp_processor_id();
@@ -206,6 +210,9 @@ int __cpuinit r4k_clockevent_init(void)
        cd->set_mode            = mips_set_clock_mode;
        cd->event_handler       = mips_event_handler;
 
+#ifdef CONFIG_CEVT_GIC
+       if (!gic_present)
+#endif
        clockevents_register_device(cd);
 
        if (cp0_timer_irq_installed)
index 5fe66a0c32245366bc56079eca0a160a7b9bc0cb..4bbffdb9024ffb9cf437adb4fd5243e75e68a88c 100644 (file)
@@ -470,6 +470,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
                c->options |= MIPS_CPU_ULRI;
        if (config3 & MIPS_CONF3_ISA)
                c->options |= MIPS_CPU_MICROMIPS;
+#ifdef CONFIG_CPU_MICROMIPS
+       write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE);
+#endif
        if (config3 & MIPS_CONF3_VZ)
                c->ases |= MIPS_ASE_VZ;
 
index 5dca24bce51bef67320417767e7df8419a983dd7..e026209011178342a6c98bbd709366fb8f3d06d8 100644 (file)
@@ -5,23 +5,14 @@
  *
  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
  */
-#include <linux/clocksource.h>
 #include <linux/init.h>
+#include <linux/time.h>
 
-#include <asm/time.h>
 #include <asm/gic.h>
 
 static cycle_t gic_hpt_read(struct clocksource *cs)
 {
-       unsigned int hi, hi2, lo;
-
-       do {
-               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
-               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
-               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
-       } while (hi2 != hi);
-
-       return (((cycle_t) hi) << 32) + lo;
+       return gic_read_count();
 }
 
 static struct clocksource gic_clocksource = {
index ecb347ce1b3d03edc9cb15c3d5dcaf9bb434da84..5c2ba9f08a80d33ed0cdf61ffaf429524c3ffd4f 100644 (file)
@@ -5,8 +5,8 @@
  *
  * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2001 MIPS Technologies, Inc.
  * Copyright (C) 2002, 2007  Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #include <linux/init.h>
 
 #include <asm/war.h>
 #include <asm/thread_info.h>
 
+#ifdef CONFIG_MIPS_MT_SMTC
 #define PANIC_PIC(msg)                                 \
-               .set push;                              \
+               .set    push;                           \
+               .set    nomicromips;                    \
                .set    reorder;                        \
                PTR_LA  a0,8f;                          \
                .set    noat;                           \
 9:             b       9b;                             \
                .set    pop;                            \
                TEXT(msg)
+#endif
 
        __INIT
 
-NESTED(except_vec0_generic, 0, sp)
-       PANIC_PIC("Exception vector 0 called")
-       END(except_vec0_generic)
-
-NESTED(except_vec1_generic, 0, sp)
-       PANIC_PIC("Exception vector 1 called")
-       END(except_vec1_generic)
-
 /*
  * General exception vector for all other CPUs.
  *
@@ -138,12 +133,19 @@ LEAF(r4k_wait)
         nop
        nop
        nop
+#ifdef CONFIG_CPU_MICROMIPS
+       nop
+       nop
+       nop
+       nop
+#endif
        .set    mips3
        wait
        /* end of rollback region (the region size must be power of two) */
-       .set    pop
 1:
        jr      ra
+       nop
+       .set    pop
        END(r4k_wait)
 
        .macro  BUILD_ROLLBACK_PROLOGUE handler
@@ -201,7 +203,11 @@ NESTED(handle_int, PT_SIZE, sp)
        LONG_L  s0, TI_REGS($28)
        LONG_S  sp, TI_REGS($28)
        PTR_LA  ra, ret_from_irq
-       j       plat_irq_dispatch
+       PTR_LA  v0, plat_irq_dispatch
+       jr      v0
+#ifdef CONFIG_CPU_MICROMIPS
+       nop
+#endif
        END(handle_int)
 
        __INIT
@@ -222,11 +228,14 @@ NESTED(except_vec4, 0, sp)
 /*
  * EJTAG debug exception handler.
  * The EJTAG debug exception entry point is 0xbfc00480, which
- * normally is in the boot PROM, so the boot PROM must do a
+ * normally is in the boot PROM, so the boot PROM must do an
  * unconditional jump to this vector.
  */
 NESTED(except_vec_ejtag_debug, 0, sp)
        j       ejtag_debug_handler
+#ifdef CONFIG_CPU_MICROMIPS
+        nop
+#endif
        END(except_vec_ejtag_debug)
 
        __FINIT
@@ -251,9 +260,10 @@ NESTED(except_vec_vi, 0, sp)
 FEXPORT(except_vec_vi_mori)
        ori     a0, $0, 0
 #endif /* CONFIG_MIPS_MT_SMTC */
+       PTR_LA  v1, except_vec_vi_handler
 FEXPORT(except_vec_vi_lui)
        lui     v0, 0           /* Patched */
-       j       except_vec_vi_handler
+       jr      v1
 FEXPORT(except_vec_vi_ori)
         ori    v0, 0           /* Patched */
        .set    pop
@@ -354,6 +364,9 @@ EXPORT(ejtag_debug_buffer)
  */
 NESTED(except_vec_nmi, 0, sp)
        j       nmi_handler
+#ifdef CONFIG_CPU_MICROMIPS
+        nop
+#endif
        END(except_vec_nmi)
 
        __FINIT
@@ -480,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
        .set    noreorder
        /* check if TLB contains a entry for EPC */
        MFC0    k1, CP0_ENTRYHI
-       andi    k1, 0xff        /* ASID_MASK */
+       andi    k1, 0xff        /* ASID_MASK patched at run-time!! */
        MFC0    k0, CP0_EPC
        PTR_SRL k0, _PAGE_SHIFT + 1
        PTR_SLL k0, _PAGE_SHIFT + 1
@@ -500,13 +513,35 @@ NESTED(nmi_handler, PT_SIZE, sp)
        .set    push
        .set    noat
        .set    noreorder
-       /* 0x7c03e83b: rdhwr v1,$29 */
+       /* MIPS32:    0x7c03e83b: rdhwr v1,$29 */
+       /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
        MFC0    k1, CP0_EPC
-       lui     k0, 0x7c03
-       lw      k1, (k1)
-       ori     k0, 0xe83b
-       .set    reorder
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
+       and     k0, k1, 1
+       beqz    k0, 1f
+       xor     k1, k0
+       lhu     k0, (k1)
+       lhu     k1, 2(k1)
+       ins     k1, k0, 16, 16
+       lui     k0, 0x007d
+       b       docheck
+       ori     k0, 0x6b3c
+1:
+       lui     k0, 0x7c03
+       lw      k1, (k1)
+       ori     k0, 0xe83b
+#else
+       andi    k0, k1, 1
+       bnez    k0, handle_ri
+       lui     k0, 0x7c03
+       lw      k1, (k1)
+       ori     k0, 0xe83b
+#endif
+       .set    reorder
+docheck:
        bne     k0, k1, handle_ri       /* if not ours */
+
+isrdhwr:
        /* The insn is rdhwr.  No need to check CAUSE.BD here. */
        get_saved_sp    /* k1 := current_thread_info */
        .set    noreorder
index 485e6a961b317ea0f6778b04e57c8c08d3ad8c02..c01b307317a9635b88394d18164710262e206234 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/init.h>
 #include <linux/smp.h>
 #include <linux/irq.h>
+#include <linux/clocksource.h>
 
 #include <asm/io.h>
 #include <asm/gic.h>
@@ -19,6 +20,8 @@
 #include <linux/hardirq.h>
 #include <asm-generic/bitops/find.h>
 
+unsigned int gic_frequency;
+unsigned int gic_present;
 unsigned long _gic_base;
 unsigned int gic_irq_base;
 unsigned int gic_irq_flags[GIC_NUM_INTRS];
@@ -30,6 +33,39 @@ static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
 static struct gic_pending_regs pending_regs[NR_CPUS];
 static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
 
+#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
+cycle_t gic_read_count(void)
+{
+       unsigned int hi, hi2, lo;
+
+       do {
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
+               GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
+       } while (hi2 != hi);
+
+       return (((cycle_t) hi) << 32) + lo;
+}
+
+void gic_write_compare(cycle_t cnt)
+{
+       GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
+                               (int)(cnt >> 32));
+       GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
+                               (int)(cnt & 0xffffffff));
+}
+
+cycle_t gic_read_compare(void)
+{
+       unsigned int hi, lo;
+
+       GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
+       GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
+
+       return (((cycle_t) hi) << 32) + lo;
+}
+#endif
+
 unsigned int gic_get_timer_pending(void)
 {
        unsigned int vpe_pending;
@@ -116,6 +152,17 @@ static void __init vpe_local_setup(unsigned int numvpes)
        }
 }
 
+unsigned int gic_compare_int(void)
+{
+       unsigned int pending;
+
+       GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
+       if (pending & GIC_VPE_PEND_CMP_MSK)
+               return 1;
+       else
+               return 0;
+}
+
 unsigned int gic_get_int(void)
 {
        unsigned int i;
index d1d576b765f5ecdd86e19c63ade4634d326687fe..0b29646bcee770533e4a0d25cf1a50b4cc167b7b 100644 (file)
@@ -165,10 +165,3 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
        return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
                             merge_64(len_a4, len_a5));
 }
-
-SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
-               u64, a3, u64, a4, int, dfd, const char  __user *, pathname)
-{
-       return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
-                                dfd, pathname);
-}
index 411a058d2c536c7709edfcb9476b4fa185034942..876097529697250254b9c3fd58492348703381e3 100644 (file)
@@ -11,9 +11,9 @@
 #include <linux/slab.h>
 
 #include <asm/mips_machine.h>
+#include <asm/prom.h>
 
 static struct mips_machine *mips_machine __initdata;
-static char *mips_machine_name = "Unknown";
 
 #define for_each_machine(mach) \
        for ((mach) = (struct mips_machine *)&__mips_machines_start; \
@@ -21,25 +21,6 @@ static char *mips_machine_name = "Unknown";
             (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \
             (mach)++)
 
-__init void mips_set_machine_name(const char *name)
-{
-       char *p;
-
-       if (name == NULL)
-               return;
-
-       p = kstrdup(name, GFP_KERNEL);
-       if (!p)
-               pr_err("MIPS: no memory for machine_name\n");
-
-       mips_machine_name = p;
-}
-
-char *mips_get_machine_name(void)
-{
-       return mips_machine_name;
-}
-
 __init int mips_machtype_setup(char *id)
 {
        struct mips_machine *mach;
@@ -79,7 +60,6 @@ __init void mips_machine_setup(void)
                return;
 
        mips_set_machine_name(mips_machine->mach_name);
-       pr_info("MIPS: machine is %s\n", mips_machine_name);
 
        if (mips_machine->mach_setup)
                mips_machine->mach_setup();
index 7a54f74b7818ad402a70221eb2090dc91f5643f8..a3e461408b7e830758d3b3dfeed661573277131e 100644 (file)
@@ -12,7 +12,7 @@
 #include <asm/cpu-features.h>
 #include <asm/mipsregs.h>
 #include <asm/processor.h>
-#include <asm/mips_machine.h>
+#include <asm/prom.h>
 
 unsigned int vced_count, vcei_count;
 
@@ -99,6 +99,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        if (cpu_has_vz)         seq_printf(m, "%s", " vz");
        seq_printf(m, "\n");
 
+       if (cpu_has_mmips) {
+               seq_printf(m, "micromips kernel\t: %s\n",
+                     (read_c0_config3() & MIPS_CONF3_ISA_OE) ?  "yes" : "no");
+       }
        seq_printf(m, "shadow register sets\t: %d\n",
                      cpu_data[n].srsets);
        seq_printf(m, "kscratch registers\t: %d\n",
index cfc742d75b7f3a74f1ace85269d55e0e0b85a48d..eb902c1f0cad4c50031836ad73225f620be8d7d5 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2013  Imagination Technologies Ltd.
  */
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -225,34 +226,115 @@ struct mips_frame_info {
 
 static inline int is_ra_save_ins(union mips_instruction *ip)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       union mips_instruction mmi;
+
+       /*
+        * swsp ra,offset
+        * swm16 reglist,offset(sp)
+        * swm32 reglist,offset(sp)
+        * sw32 ra,offset(sp)
+        * jradiussp - NOT SUPPORTED
+        *
+        * microMIPS is way more fun...
+        */
+       if (mm_insn_16bit(ip->halfword[0])) {
+               mmi.word = (ip->halfword[0] << 16);
+               return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
+                        mmi.mm16_r5_format.rt == 31) ||
+                       (mmi.mm16_m_format.opcode == mm_pool16c_op &&
+                        mmi.mm16_m_format.func == mm_swm16_op));
+       }
+       else {
+               mmi.halfword[0] = ip->halfword[1];
+               mmi.halfword[1] = ip->halfword[0];
+               return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
+                        mmi.mm_m_format.rd > 9 &&
+                        mmi.mm_m_format.base == 29 &&
+                        mmi.mm_m_format.func == mm_swm32_func) ||
+                       (mmi.i_format.opcode == mm_sw32_op &&
+                        mmi.i_format.rs == 29 &&
+                        mmi.i_format.rt == 31));
+       }
+#else
        /* sw / sd $ra, offset($sp) */
        return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
                ip->i_format.rs == 29 &&
                ip->i_format.rt == 31;
+#endif
 }
 
 static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * jr16,jrc,jalr16,jalr16
+        * jal
+        * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
+        * jraddiusp - NOT SUPPORTED
+        *
+        * microMIPS is kind of more fun...
+        */
+       union mips_instruction mmi;
+
+       mmi.word = (ip->halfword[0] << 16);
+
+       if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
+           (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
+           ip->j_format.opcode == mm_jal32_op)
+               return 1;
+       if (ip->r_format.opcode != mm_pool32a_op ||
+                       ip->r_format.func != mm_pool32axf_op)
+               return 0;
+       return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
+#else
        if (ip->j_format.opcode == jal_op)
                return 1;
        if (ip->r_format.opcode != spec_op)
                return 0;
        return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
+#endif
 }
 
 static inline int is_sp_move_ins(union mips_instruction *ip)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * addiusp -imm
+        * addius5 sp,-imm
+        * addiu32 sp,sp,-imm
+        * jradiussp - NOT SUPPORTED
+        *
+        * microMIPS is not more fun...
+        */
+       if (mm_insn_16bit(ip->halfword[0])) {
+               union mips_instruction mmi;
+
+               mmi.word = (ip->halfword[0] << 16);
+               return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
+                        mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
+                       (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
+                        mmi.mm16_r5_format.rt == 29));
+       }
+       return (ip->mm_i_format.opcode == mm_addiu32_op &&
+                ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
+#else
        /* addiu/daddiu sp,sp,-imm */
        if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
                return 0;
        if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
                return 1;
+#endif
        return 0;
 }
 
 static int get_frame_info(struct mips_frame_info *info)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       union mips_instruction *ip = (void *) (((char *) info->func) - 1);
+#else
        union mips_instruction *ip = info->func;
+#endif
        unsigned max_insns = info->func_size / sizeof(union mips_instruction);
        unsigned i;
 
@@ -272,7 +354,26 @@ static int get_frame_info(struct mips_frame_info *info)
                        break;
                if (!info->frame_size) {
                        if (is_sp_move_ins(ip))
+                       {
+#ifdef CONFIG_CPU_MICROMIPS
+                               if (mm_insn_16bit(ip->halfword[0]))
+                               {
+                                       unsigned short tmp;
+
+                                       if (ip->halfword[0] & mm_addiusp_func)
+                                       {
+                                               tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
+                                               info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
+                                       } else {
+                                               tmp = (ip->halfword[0] >> 1);
+                                               info->frame_size = -(signed short)(tmp & 0xf);
+                                       }
+                                       ip = (void *) &ip->halfword[1];
+                                       ip--;
+                               } else
+#endif
                                info->frame_size = - ip->i_format.simmediate;
+                       }
                        continue;
                }
                if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
index 028f6f837ef9c975697cd763fa685126a5be6b48..5712bb5322454f1a98d169e58490e77b62e149be 100644 (file)
 #include <asm/page.h>
 #include <asm/prom.h>
 
+static char mips_machine_name[64] = "Unknown";
+
+__init void mips_set_machine_name(const char *name)
+{
+       if (name == NULL)
+               return;
+
+       strncpy(mips_machine_name, name, sizeof(mips_machine_name));
+       pr_info("MIPS: machine is %s\n", mips_get_machine_name());
+}
+
+char *mips_get_machine_name(void)
+{
+       return mips_machine_name;
+}
+
+#ifdef CONFIG_OF
 int __init early_init_dt_scan_memory_arch(unsigned long node,
                                          const char *uname, int depth,
                                          void *data)
@@ -50,6 +67,18 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start,
 }
 #endif
 
+int __init early_init_dt_scan_model(unsigned long node,        const char *uname,
+                                   int depth, void *data)
+{
+       if (!depth) {
+               char *model = of_get_flat_dt_prop(node, "model", NULL);
+
+               if (model)
+                       mips_set_machine_name(model);
+       }
+       return 0;
+}
+
 void __init early_init_devtree(void *params)
 {
        /* Setup flat device-tree pointer */
@@ -65,6 +94,9 @@ void __init early_init_devtree(void *params)
        /* Scan memory nodes */
        of_scan_flat_dt(early_init_dt_scan_root, NULL);
        of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
+
+       /* try to load the mips machine name */
+       of_scan_flat_dt(early_init_dt_scan_model, NULL);
 }
 
 void __init __dt_setup_arch(struct boot_param_header *bph)
@@ -79,3 +111,4 @@ void __init __dt_setup_arch(struct boot_param_header *bph)
 
        early_init_devtree(initial_boot_params);
 }
+#endif
index 9ea29649fc28b6c119e965e750fe77efeaef77ec..9b36424b03c5f41aa48312c770f90e69a43f6fae 100644 (file)
@@ -138,9 +138,18 @@ stackargs:
 5:     jr      t1
         sw     t5, 16(sp)              # argument #5 to ksp
 
+#ifdef CONFIG_CPU_MICROMIPS
        sw      t8, 28(sp)              # argument #8 to ksp
+       nop
        sw      t7, 24(sp)              # argument #7 to ksp
+       nop
        sw      t6, 20(sp)              # argument #6 to ksp
+       nop
+#else
+       sw      t8, 28(sp)              # argument #8 to ksp
+       sw      t7, 24(sp)              # argument #7 to ksp
+       sw      t6, 20(sp)              # argument #6 to ksp
+#endif
 6:     j       stack_done              # go back
         nop
        .set    pop
index 103bfe570fe8ee375d94732b074efee7723ec550..74f485d3c0ef41bb7f73204f06a7a801d0465f13 100644 (file)
@@ -529,7 +529,7 @@ sys_call_table:
        PTR     sys_accept4
        PTR     compat_sys_recvmmsg             /* 4335 */
        PTR     sys_fanotify_init
-       PTR     sys_32_fanotify_mark
+       PTR     compat_sys_fanotify_mark
        PTR     sys_prlimit64
        PTR     sys_name_to_handle_at
        PTR     compat_sys_open_by_handle_at    /* 4340 */
index 4c774d5d50874ab515e023809ffd48fcb611c053..c7f90519e58ce0ade98dd826752fbaa1d2287da0 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/pfn.h>
 #include <linux/debugfs.h>
 #include <linux/kexec.h>
+#include <linux/sizes.h>
 
 #include <asm/addrspace.h>
 #include <asm/bootinfo.h>
@@ -77,6 +78,8 @@ EXPORT_SYMBOL(mips_io_port_base);
 static struct resource code_resource = { .name = "Kernel code", };
 static struct resource data_resource = { .name = "Kernel data", };
 
+static void *detect_magic __initdata = detect_memory_region;
+
 void __init add_memory_region(phys_t start, phys_t size, long type)
 {
        int x = boot_mem_map.nr_map;
@@ -122,6 +125,25 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
        boot_mem_map.nr_map++;
 }
 
+void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max)
+{
+       void *dm = &detect_magic;
+       phys_t size;
+
+       for (size = sz_min; size < sz_max; size <<= 1) {
+               if (!memcmp(dm, dm + size, sizeof(detect_magic)))
+                       break;
+       }
+
+       pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
+               ((unsigned long long) size) / SZ_1M,
+               (unsigned long long) start,
+               ((unsigned long long) sz_min) / SZ_1M,
+               ((unsigned long long) sz_max) / SZ_1M);
+
+       add_memory_region(start, size, BOOT_MEM_RAM);
+}
+
 static void __init print_memory_map(void)
 {
        int i;
index b5e88fd832775009a774b727bfcb489667e3cca8..fd3ef2c2afbc37732d9bedcb9fff59d1dda935ea 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/war.h>
 #include <asm/vdso.h>
 #include <asm/dsp.h>
+#include <asm/inst.h>
 
 #include "signal-common.h"
 
@@ -480,7 +481,15 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
        sigset_t *oldset = sigmask_to_save();
        int ret;
        struct mips_abi *abi = current->thread.abi;
+#ifdef CONFIG_CPU_MICROMIPS
+       void *vdso;
+       unsigned int tmp = (unsigned int)current->mm->context.vdso;
+
+       set_isa16_mode(tmp);
+       vdso = (void *)tmp;
+#else
        void *vdso = current->mm->context.vdso;
+#endif
 
        if (regs->regs[0]) {
                switch(regs->regs[2]) {
index bfede063d96a52e3dd8f12aedf67f15468d7c3aa..3e5164c11cacabe66ea021cb29c979b93d920e00 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/mips_mt.h>
+#include <asm/gic.h>
 
 static void __init smvp_copy_vpe_config(void)
 {
@@ -151,8 +152,6 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
 static void __cpuinit vsmp_init_secondary(void)
 {
 #ifdef CONFIG_IRQ_GIC
-       extern int gic_present;
-
        /* This is Malta specific: IPI,performance and timer interrupts */
        if (gic_present)
                change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
index aee04af213c5895d5b273b4f17fe896621707705..c17619fe18e32a9f23a5df7e18b0b943b5d0cf2c 100644 (file)
@@ -83,6 +83,7 @@ static inline void set_cpu_sibling_map(int cpu)
 }
 
 struct plat_smp_ops *mp_ops;
+EXPORT_SYMBOL(mp_ops);
 
 __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
 {
index 76016ac0a9c8818960e61f055ac47e4d55a9bdf2..2866863a39df269da6ba9710acf96808197e33ea 100644 (file)
@@ -49,6 +49,9 @@ CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
        .text
        .align 5
 FEXPORT(__smtc_ipi_vector)
+#ifdef CONFIG_CPU_MICROMIPS
+       nop
+#endif
        .set    noat
        /* Disable thread scheduling to make Status update atomic */
        DMT     27                                      # dmt   k1
index 7186222dc5bb285a4ff74a23a5851dd03b9356c1..31d22f3121c98bb8c0b57488c60c58d4c0ca5b4c 100644 (file)
@@ -111,7 +111,7 @@ static int vpe0limit;
 static int ipibuffers;
 static int nostlb;
 static int asidmask;
-unsigned long smtc_asid_mask = 0xff;
+unsigned int smtc_asid_mask = 0xff;
 
 static int __init vpe0tcs(char *str)
 {
@@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
        asid = asid_cache(cpu);
 
        do {
-               if (!((asid += ASID_INC) & ASID_MASK) ) {
+               if (!ASID_MASK(ASID_INC(asid))) {
                        if (cpu_has_vtag_icache)
                                flush_icache_all();
                        /* Traverse all online CPUs (hack requires contiguous range) */
@@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
                                                mips_ihb();
                                        }
                                        tcstat = read_tc_c0_tcstatus();
-                                       smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
+                                       smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
                                        if (!prevhalt)
                                                write_tc_c0_tchalt(0);
                                }
@@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
                                asid = ASID_FIRST_VERSION;
                        local_flush_tlb_all();  /* start new asid cycle */
                }
-       } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
+       } while (smtc_live_asid[tlb][ASID_MASK(asid)]);
 
        /*
         * SMTC shares the TLB within VPEs and possibly across all VPEs.
@@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
                tlb_read();
                ehb();
                ehi = read_c0_entryhi();
-               if ((ehi & ASID_MASK) == asid) {
+               if (ASID_MASK(ehi) == asid) {
                    /*
                     * Invalidate only entries with specified ASID,
                     * makiing sure all entries differ.
index 25225515451f8f348d4f2ffa91a4c64c9b004a82..77cff1f6d050cb92e21475ae52a9ef2f037b5bc5 100644 (file)
@@ -8,8 +8,8 @@
  * Copyright (C) 1998 Ulf Carlsson
  * Copyright (C) 1999 Silicon Graphics, Inc.
  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000, 01 MIPS Technologies, Inc.
  * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  */
 #include <linux/bug.h>
 #include <linux/compiler.h>
@@ -60,9 +60,9 @@ extern void check_wait(void);
 extern asmlinkage void r4k_wait(void);
 extern asmlinkage void rollback_handle_int(void);
 extern asmlinkage void handle_int(void);
-extern asmlinkage void handle_tlbm(void);
-extern asmlinkage void handle_tlbl(void);
-extern asmlinkage void handle_tlbs(void);
+extern u32 handle_tlbl[];
+extern u32 handle_tlbs[];
+extern u32 handle_tlbm[];
 extern asmlinkage void handle_adel(void);
 extern asmlinkage void handle_ades(void);
 extern asmlinkage void handle_ibe(void);
@@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void);
 extern asmlinkage void handle_mcheck(void);
 extern asmlinkage void handle_reserved(void);
 
-extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
-                                   struct mips_fpu_struct *ctx, int has_fpu,
-                                   void *__user *fault_addr);
-
 void (*board_be_init)(void);
 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 void (*board_nmi_handler_setup)(void);
@@ -482,6 +478,12 @@ asmlinkage void do_be(struct pt_regs *regs)
 #define SYNC   0x0000000f
 #define RDHWR  0x0000003b
 
+/*  microMIPS definitions   */
+#define MM_POOL32A_FUNC 0xfc00ffff
+#define MM_RDHWR        0x00006b3c
+#define MM_RS           0x001f0000
+#define MM_RT           0x03e00000
+
 /*
  * The ll_bit is cleared by r*_switch.S
  */
@@ -596,42 +598,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
  * Simulate trapping 'rdhwr' instructions to provide user accessible
  * registers not implemented in hardware.
  */
-static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
+static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 {
        struct thread_info *ti = task_thread_info(current);
 
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+                       1, regs, 0);
+       switch (rd) {
+       case 0:         /* CPU number */
+               regs->regs[rt] = smp_processor_id();
+               return 0;
+       case 1:         /* SYNCI length */
+               regs->regs[rt] = min(current_cpu_data.dcache.linesz,
+                                    current_cpu_data.icache.linesz);
+               return 0;
+       case 2:         /* Read count register */
+               regs->regs[rt] = read_c0_count();
+               return 0;
+       case 3:         /* Count register resolution */
+               switch (current_cpu_data.cputype) {
+               case CPU_20KC:
+               case CPU_25KF:
+                       regs->regs[rt] = 1;
+                       break;
+               default:
+                       regs->regs[rt] = 2;
+               }
+               return 0;
+       case 29:
+               regs->regs[rt] = ti->tp_value;
+               return 0;
+       default:
+               return -1;
+       }
+}
+
+static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
+{
        if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
                int rd = (opcode & RD) >> 11;
                int rt = (opcode & RT) >> 16;
-               perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
-                               1, regs, 0);
-               switch (rd) {
-               case 0:         /* CPU number */
-                       regs->regs[rt] = smp_processor_id();
-                       return 0;
-               case 1:         /* SYNCI length */
-                       regs->regs[rt] = min(current_cpu_data.dcache.linesz,
-                                            current_cpu_data.icache.linesz);
-                       return 0;
-               case 2:         /* Read count register */
-                       regs->regs[rt] = read_c0_count();
-                       return 0;
-               case 3:         /* Count register resolution */
-                       switch (current_cpu_data.cputype) {
-                       case CPU_20KC:
-                       case CPU_25KF:
-                               regs->regs[rt] = 1;
-                               break;
-                       default:
-                               regs->regs[rt] = 2;
-                       }
-                       return 0;
-               case 29:
-                       regs->regs[rt] = ti->tp_value;
-                       return 0;
-               default:
-                       return -1;
-               }
+
+               simulate_rdhwr(regs, rd, rt);
+               return 0;
+       }
+
+       /* Not ours.  */
+       return -1;
+}
+
+static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
+{
+       if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
+               int rd = (opcode & MM_RS) >> 16;
+               int rt = (opcode & MM_RT) >> 21;
+               simulate_rdhwr(regs, rd, rt);
+               return 0;
        }
 
        /* Not ours.  */
@@ -662,7 +684,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
        force_sig_info(SIGFPE, &info, current);
 }
 
-static int process_fpemu_return(int sig, void __user *fault_addr)
+int process_fpemu_return(int sig, void __user *fault_addr)
 {
        if (sig == SIGSEGV || sig == SIGBUS) {
                struct siginfo si = {0};
@@ -813,9 +835,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
 asmlinkage void do_bp(struct pt_regs *regs)
 {
        unsigned int opcode, bcode;
-
-       if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
-               goto out_sigsegv;
+       unsigned long epc;
+       u16 instr[2];
+
+       if (get_isa16_mode(regs->cp0_epc)) {
+               /* Calculate EPC. */
+               epc = exception_epc(regs);
+               if (cpu_has_mmips) {
+                       if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
+                           (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
+                               goto out_sigsegv;
+                   opcode = (instr[0] << 16) | instr[1];
+               } else {
+                   /* MIPS16e mode */
+                   if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
+                               goto out_sigsegv;
+                   bcode = (instr[0] >> 6) & 0x3f;
+                   do_trap_or_bp(regs, bcode, "Break");
+                   return;
+               }
+       } else {
+               if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+                       goto out_sigsegv;
+       }
 
        /*
         * There is the ancient bug in the MIPS assemblers that the break
@@ -856,13 +898,22 @@ out_sigsegv:
 asmlinkage void do_tr(struct pt_regs *regs)
 {
        unsigned int opcode, tcode = 0;
+       u16 instr[2];
+       unsigned long epc = exception_epc(regs);
 
-       if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
-               goto out_sigsegv;
+       if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
+               (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
+                       goto out_sigsegv;
+       opcode = (instr[0] << 16) | instr[1];
 
        /* Immediate versions don't provide a code.  */
-       if (!(opcode & OPCODE))
-               tcode = ((opcode >> 6) & ((1 << 10) - 1));
+       if (!(opcode & OPCODE)) {
+               if (get_isa16_mode(regs->cp0_epc))
+                       /* microMIPS */
+                       tcode = (opcode >> 12) & 0x1f;
+               else
+                       tcode = ((opcode >> 6) & ((1 << 10) - 1));
+       }
 
        do_trap_or_bp(regs, tcode, "Trap");
        return;
@@ -875,6 +926,7 @@ asmlinkage void do_ri(struct pt_regs *regs)
 {
        unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
        unsigned long old_epc = regs->cp0_epc;
+       unsigned long old31 = regs->regs[31];
        unsigned int opcode = 0;
        int status = -1;
 
@@ -887,23 +939,37 @@ asmlinkage void do_ri(struct pt_regs *regs)
        if (unlikely(compute_return_epc(regs) < 0))
                return;
 
-       if (unlikely(get_user(opcode, epc) < 0))
-               status = SIGSEGV;
+       if (get_isa16_mode(regs->cp0_epc)) {
+               unsigned short mmop[2] = { 0 };
 
-       if (!cpu_has_llsc && status < 0)
-               status = simulate_llsc(regs, opcode);
+               if (unlikely(get_user(mmop[0], epc) < 0))
+                       status = SIGSEGV;
+               if (unlikely(get_user(mmop[1], epc) < 0))
+                       status = SIGSEGV;
+               opcode = (mmop[0] << 16) | mmop[1];
 
-       if (status < 0)
-               status = simulate_rdhwr(regs, opcode);
+               if (status < 0)
+                       status = simulate_rdhwr_mm(regs, opcode);
+       } else {
+               if (unlikely(get_user(opcode, epc) < 0))
+                       status = SIGSEGV;
 
-       if (status < 0)
-               status = simulate_sync(regs, opcode);
+               if (!cpu_has_llsc && status < 0)
+                       status = simulate_llsc(regs, opcode);
+
+               if (status < 0)
+                       status = simulate_rdhwr_normal(regs, opcode);
+
+               if (status < 0)
+                       status = simulate_sync(regs, opcode);
+       }
 
        if (status < 0)
                status = SIGILL;
 
        if (unlikely(status > 0)) {
                regs->cp0_epc = old_epc;                /* Undo skip-over.  */
+               regs->regs[31] = old31;
                force_sig(status, current);
        }
 }
@@ -973,7 +1039,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
 asmlinkage void do_cpu(struct pt_regs *regs)
 {
        unsigned int __user *epc;
-       unsigned long old_epc;
+       unsigned long old_epc, old31;
        unsigned int opcode;
        unsigned int cpid;
        int status;
@@ -987,26 +1053,41 @@ asmlinkage void do_cpu(struct pt_regs *regs)
        case 0:
                epc = (unsigned int __user *)exception_epc(regs);
                old_epc = regs->cp0_epc;
+               old31 = regs->regs[31];
                opcode = 0;
                status = -1;
 
                if (unlikely(compute_return_epc(regs) < 0))
                        return;
 
-               if (unlikely(get_user(opcode, epc) < 0))
-                       status = SIGSEGV;
+               if (get_isa16_mode(regs->cp0_epc)) {
+                       unsigned short mmop[2] = { 0 };
 
-               if (!cpu_has_llsc && status < 0)
-                       status = simulate_llsc(regs, opcode);
+                       if (unlikely(get_user(mmop[0], epc) < 0))
+                               status = SIGSEGV;
+                       if (unlikely(get_user(mmop[1], epc) < 0))
+                               status = SIGSEGV;
+                       opcode = (mmop[0] << 16) | mmop[1];
 
-               if (status < 0)
-                       status = simulate_rdhwr(regs, opcode);
+                       if (status < 0)
+                               status = simulate_rdhwr_mm(regs, opcode);
+               } else {
+                       if (unlikely(get_user(opcode, epc) < 0))
+                               status = SIGSEGV;
+
+                       if (!cpu_has_llsc && status < 0)
+                               status = simulate_llsc(regs, opcode);
+
+                       if (status < 0)
+                               status = simulate_rdhwr_normal(regs, opcode);
+               }
 
                if (status < 0)
                        status = SIGILL;
 
                if (unlikely(status > 0)) {
                        regs->cp0_epc = old_epc;        /* Undo skip-over.  */
+                       regs->regs[31] = old31;
                        force_sig(status, current);
                }
 
@@ -1320,7 +1401,7 @@ asmlinkage void cache_parity_error(void)
 void ejtag_exception_handler(struct pt_regs *regs)
 {
        const int field = 2 * sizeof(unsigned long);
-       unsigned long depc, old_epc;
+       unsigned long depc, old_epc, old_ra;
        unsigned int debug;
 
        printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
@@ -1335,10 +1416,12 @@ void ejtag_exception_handler(struct pt_regs *regs)
                 * calculation.
                 */
                old_epc = regs->cp0_epc;
+               old_ra = regs->regs[31];
                regs->cp0_epc = depc;
-               __compute_return_epc(regs);
+               compute_return_epc(regs);
                depc = regs->cp0_epc;
                regs->cp0_epc = old_epc;
+               regs->regs[31] = old_ra;
        } else
                depc += 4;
        write_c0_depc(depc);
@@ -1377,11 +1460,27 @@ unsigned long vi_handlers[64];
 void __init *set_except_vector(int n, void *addr)
 {
        unsigned long handler = (unsigned long) addr;
-       unsigned long old_handler = exception_handlers[n];
+       unsigned long old_handler;
+
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * Only the TLB handlers are cache aligned with an even
+        * address. All other handlers are on an odd address and
+        * require no modification. Otherwise, MIPS32 mode will
+        * be entered when handling any TLB exceptions. That
+        * would be bad...since we must stay in microMIPS mode.
+        */
+       if (!(handler & 0x1))
+               handler |= 1;
+#endif
+       old_handler = xchg(&exception_handlers[n], handler);
 
-       exception_handlers[n] = handler;
        if (n == 0 && cpu_has_divec) {
+#ifdef CONFIG_CPU_MICROMIPS
+               unsigned long jump_mask = ~((1 << 27) - 1);
+#else
                unsigned long jump_mask = ~((1 << 28) - 1);
+#endif
                u32 *buf = (u32 *)(ebase + 0x200);
                unsigned int k0 = 26;
                if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
@@ -1397,7 +1496,7 @@ void __init *set_except_vector(int n, void *addr)
        return (void *)old_handler;
 }
 
-static asmlinkage void do_default_vi(void)
+static void do_default_vi(void)
 {
        show_regs(get_irq_regs());
        panic("Caught unexpected vectored interrupt.");
@@ -1408,17 +1507,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
        unsigned long handler;
        unsigned long old_handler = vi_handlers[n];
        int srssets = current_cpu_data.srsets;
-       u32 *w;
+       u16 *h;
        unsigned char *b;
 
        BUG_ON(!cpu_has_veic && !cpu_has_vint);
+       BUG_ON((n < 0) && (n > 9));
 
        if (addr == NULL) {
                handler = (unsigned long) do_default_vi;
                srs = 0;
        } else
                handler = (unsigned long) addr;
-       vi_handlers[n] = (unsigned long) addr;
+       vi_handlers[n] = handler;
 
        b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
 
@@ -1437,9 +1537,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
        if (srs == 0) {
                /*
                 * If no shadow set is selected then use the default handler
-                * that does normal register saving and standard interrupt exit
+                * that does normal register saving and standard interrupt exit
                 */
-
                extern char except_vec_vi, except_vec_vi_lui;
                extern char except_vec_vi_ori, except_vec_vi_end;
                extern char rollback_except_vec_vi;
@@ -1452,11 +1551,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
                 * Status.IM bit to be masked before going there.
                 */
                extern char except_vec_vi_mori;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+               const int mori_offset = &except_vec_vi_mori - vec_start + 2;
+#else
                const int mori_offset = &except_vec_vi_mori - vec_start;
+#endif
 #endif /* CONFIG_MIPS_MT_SMTC */
-               const int handler_len = &except_vec_vi_end - vec_start;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+               const int lui_offset = &except_vec_vi_lui - vec_start + 2;
+               const int ori_offset = &except_vec_vi_ori - vec_start + 2;
+#else
                const int lui_offset = &except_vec_vi_lui - vec_start;
                const int ori_offset = &except_vec_vi_ori - vec_start;
+#endif
+               const int handler_len = &except_vec_vi_end - vec_start;
 
                if (handler_len > VECTORSPACING) {
                        /*
@@ -1466,30 +1574,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
                        panic("VECTORSPACING too small");
                }
 
-               memcpy(b, vec_start, handler_len);
+               set_handler(((unsigned long)b - ebase), vec_start,
+#ifdef CONFIG_CPU_MICROMIPS
+                               (handler_len - 1));
+#else
+                               handler_len);
+#endif
 #ifdef CONFIG_MIPS_MT_SMTC
                BUG_ON(n > 7);  /* Vector index %d exceeds SMTC maximum. */
 
-               w = (u32 *)(b + mori_offset);
-               *w = (*w & 0xffff0000) | (0x100 << n);
+               h = (u16 *)(b + mori_offset);
+               *h = (0x100 << n);
 #endif /* CONFIG_MIPS_MT_SMTC */
-               w = (u32 *)(b + lui_offset);
-               *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
-               w = (u32 *)(b + ori_offset);
-               *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
+               h = (u16 *)(b + lui_offset);
+               *h = (handler >> 16) & 0xffff;
+               h = (u16 *)(b + ori_offset);
+               *h = (handler & 0xffff);
                local_flush_icache_range((unsigned long)b,
                                         (unsigned long)(b+handler_len));
        }
        else {
                /*
-                * In other cases jump directly to the interrupt handler
-                *
-                * It is the handlers responsibility to save registers if required
-                * (eg hi/lo) and return from the exception using "eret"
+                * In other cases jump directly to the interrupt handler. It
+                * is the handler's responsibility to save registers if required
+                * (eg hi/lo) and return from the exception using "eret".
                 */
-               w = (u32 *)b;
-               *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
-               *w = 0;
+               u32 insn;
+
+               h = (u16 *)b;
+               /* j handler */
+#ifdef CONFIG_CPU_MICROMIPS
+               insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
+#else
+               insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
+#endif
+               h[0] = (insn >> 16) & 0xffff;
+               h[1] = insn & 0xffff;
+               h[2] = 0;
+               h[3] = 0;
                local_flush_icache_range((unsigned long)b,
                                         (unsigned long)(b+8));
        }
@@ -1534,6 +1656,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
        unsigned int cpu = smp_processor_id();
        unsigned int status_set = ST0_CU0;
        unsigned int hwrena = cpu_hwrena_impl_bits;
+       unsigned long asid = 0;
 #ifdef CONFIG_MIPS_MT_SMTC
        int secondaryTC = 0;
        int bootTC = (cpu == 0);
@@ -1617,8 +1740,9 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
        }
 #endif /* CONFIG_MIPS_MT_SMTC */
 
-       if (!cpu_data[cpu].asid_cache)
-               cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
+       asid = ASID_FIRST_VERSION;
+       cpu_data[cpu].asid_cache = asid;
+       TLBMISS_HANDLER_SETUP();
 
        atomic_inc(&init_mm.mm_count);
        current->active_mm = &init_mm;
@@ -1648,7 +1772,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
 /* Install CPU exception handler */
 void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
 {
+#ifdef CONFIG_CPU_MICROMIPS
+       memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
+#else
        memcpy((void *)(ebase + offset), addr, size);
+#endif
        local_flush_icache_range(ebase + offset, ebase + offset + size);
 }
 
@@ -1682,8 +1810,9 @@ __setup("rdhwr_noopt", set_rdhwr_noopt);
 
 void __init trap_init(void)
 {
-       extern char except_vec3_generic, except_vec3_r4000;
+       extern char except_vec3_generic;
        extern char except_vec4;
+       extern char except_vec3_r4000;
        unsigned long i;
        int rollback;
 
@@ -1700,7 +1829,12 @@ void __init trap_init(void)
                ebase = (unsigned long)
                        __alloc_bootmem(size, 1 << fls(size), 0);
        } else {
-               ebase = CKSEG0;
+#ifdef CONFIG_KVM_GUEST
+#define KVM_GUEST_KSEG0     0x40000000
+        ebase = KVM_GUEST_KSEG0;
+#else
+        ebase = CKSEG0;
+#endif
                if (cpu_has_mips_r2)
                        ebase += (read_c0_ebase() & 0x3ffff000);
        }
@@ -1816,11 +1950,11 @@ void __init trap_init(void)
 
        if (cpu_has_vce)
                /* Special exception: R4[04]00 uses also the divec space. */
-               memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
+               set_handler(0x180, &except_vec3_r4000, 0x100);
        else if (cpu_has_4kex)
-               memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
+               set_handler(0x180, &except_vec3_generic, 0x80);
        else
-               memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
+               set_handler(0x080, &except_vec3_generic, 0x80);
 
        local_flush_icache_range(ebase, ebase + 0x400);
        flush_tlb_handlers();
index 6087a54c86a0dbc5e6e9a167659df9408a1dc3e1..203d8857070dd225f2d8fdea7bf986ad7a6560cc 100644 (file)
 #include <asm/branch.h>
 #include <asm/byteorder.h>
 #include <asm/cop2.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
 #include <asm/inst.h>
 #include <asm/uaccess.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
 
 #define STR(x) __STR(x)
 #define __STR(x)  #x
@@ -102,12 +106,332 @@ static u32 unaligned_action;
 #endif
 extern void show_registers(struct pt_regs *regs);
 
+#ifdef __BIG_ENDIAN
+#define     LoadHW(addr, value, res)  \
+               __asm__ __volatile__ (".set\tnoat\n"        \
+                       "1:\tlb\t%0, 0(%2)\n"               \
+                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\t.set\tat\n\t"                  \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadW(addr, value, res)   \
+               __asm__ __volatile__ (                      \
+                       "1:\tlwl\t%0, (%2)\n"               \
+                       "2:\tlwr\t%0, 3(%2)\n\t"            \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadHWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlbu\t%0, 0(%2)\n"              \
+                       "2:\tlbu\t$1, 1(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".set\tat\n\t"                      \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadWU(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tlwl\t%0, (%2)\n"               \
+                       "2:\tlwr\t%0, 3(%2)\n\t"            \
+                       "dsll\t%0, %0, 32\n\t"              \
+                       "dsrl\t%0, %0, 32\n\t"              \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       "\t.section\t.fixup,\"ax\"\n\t"     \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tldl\t%0, (%2)\n"               \
+                       "2:\tldr\t%0, 7(%2)\n\t"            \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       "\t.section\t.fixup,\"ax\"\n\t"     \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     StoreHW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tsb\t%1, 1(%2)\n\t"             \
+                       "srl\t$1, %1, 0x8\n"                \
+                       "2:\tsb\t$1, 0(%2)\n\t"             \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=r" (res)                        \
+                       : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tswl\t%1,(%2)\n"                \
+                       "2:\tswr\t%1, 3(%2)\n\t"            \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       "1:\tsdl\t%1,(%2)\n"                \
+                       "2:\tsdr\t%1, 7(%2)\n\t"            \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+#endif
+
+#ifdef __LITTLE_ENDIAN
+#define     LoadHW(addr, value, res)  \
+               __asm__ __volatile__ (".set\tnoat\n"        \
+                       "1:\tlb\t%0, 1(%2)\n"               \
+                       "2:\tlbu\t$1, 0(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\t.set\tat\n\t"                  \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadW(addr, value, res)   \
+               __asm__ __volatile__ (                      \
+                       "1:\tlwl\t%0, 3(%2)\n"              \
+                       "2:\tlwr\t%0, (%2)\n\t"             \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadHWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tlbu\t%0, 1(%2)\n"              \
+                       "2:\tlbu\t$1, 0(%2)\n\t"            \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".set\tat\n\t"                      \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadWU(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tlwl\t%0, 3(%2)\n"              \
+                       "2:\tlwr\t%0, (%2)\n\t"             \
+                       "dsll\t%0, %0, 32\n\t"              \
+                       "dsrl\t%0, %0, 32\n\t"              \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       "\t.section\t.fixup,\"ax\"\n\t"     \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tldl\t%0, 7(%2)\n"              \
+                       "2:\tldr\t%0, (%2)\n\t"             \
+                       "li\t%1, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       "\t.section\t.fixup,\"ax\"\n\t"     \
+                       "4:\tli\t%1, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     StoreHW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tnoat\n"                      \
+                       "1:\tsb\t%1, 0(%2)\n\t"             \
+                       "srl\t$1,%1, 0x8\n"                 \
+                       "2:\tsb\t$1, 1(%2)\n\t"             \
+                       ".set\tat\n\t"                      \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+                       : "=r" (res)                        \
+                       : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       "1:\tswl\t%1, 3(%2)\n"              \
+                       "2:\tswr\t%1, (%2)\n\t"             \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       "1:\tsdl\t%1, 7(%2)\n"              \
+                       "2:\tsdr\t%1, (%2)\n\t"             \
+                       "li\t%0, 0\n"                       \
+                       "3:\n\t"                            \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "4:\tli\t%0, %3\n\t"                \
+                       "j\t3b\n\t"                         \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 4b\n\t"              \
+                       STR(PTR)"\t2b, 4b\n\t"              \
+                       ".previous"                         \
+               : "=r" (res)                                \
+               : "r" (value), "r" (addr), "i" (-EFAULT));
+#endif
+
 static void emulate_load_store_insn(struct pt_regs *regs,
        void __user *addr, unsigned int __user *pc)
 {
        union mips_instruction insn;
        unsigned long value;
        unsigned int res;
+       unsigned long origpc;
+       unsigned long orig31;
+       void __user *fault_addr = NULL;
+
+       origpc = (unsigned long)pc;
+       orig31 = regs->regs[31];
 
        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
@@ -117,22 +441,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        __get_user(insn.word, pc);
 
        switch (insn.i_format.opcode) {
-       /*
-        * These are instructions that a compiler doesn't generate.  We
-        * can assume therefore that the code is MIPS-aware and
-        * really buggy.  Emulating these instructions would break the
-        * semantics anyway.
-        */
+               /*
+                * These are instructions that a compiler doesn't generate.  We
+                * can assume therefore that the code is MIPS-aware and
+                * really buggy.  Emulating these instructions would break the
+                * semantics anyway.
+                */
        case ll_op:
        case lld_op:
        case sc_op:
        case scd_op:
 
-       /*
-        * For these instructions the only way to create an address
-        * error is an attempted access to kernel/supervisor address
-        * space.
-        */
+               /*
+                * For these instructions the only way to create an address
+                * error is an attempted access to kernel/supervisor address
+                * space.
+                */
        case ldl_op:
        case ldr_op:
        case lwl_op:
@@ -146,36 +470,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        case sb_op:
                goto sigbus;
 
-       /*
-        * The remaining opcodes are the ones that are really of interest.
-        */
+               /*
+                * The remaining opcodes are the ones that are really of
+                * interest.
+                */
        case lh_op:
                if (!access_ok(VERIFY_READ, addr, 2))
                        goto sigbus;
 
-               __asm__ __volatile__ (".set\tnoat\n"
-#ifdef __BIG_ENDIAN
-                       "1:\tlb\t%0, 0(%2)\n"
-                       "2:\tlbu\t$1, 1(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tlb\t%0, 1(%2)\n"
-                       "2:\tlbu\t$1, 0(%2)\n\t"
-#endif
-                       "sll\t%0, 0x8\n\t"
-                       "or\t%0, $1\n\t"
-                       "li\t%1, 0\n"
-                       "3:\t.set\tat\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadHW(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -186,26 +489,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 4))
                        goto sigbus;
 
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tlwl\t%0, (%2)\n"
-                       "2:\tlwr\t%0, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tlwl\t%0, 3(%2)\n"
-                       "2:\tlwr\t%0, (%2)\n\t"
-#endif
-                       "li\t%1, 0\n"
-                       "3:\t.section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadW(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -216,30 +500,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 2))
                        goto sigbus;
 
-               __asm__ __volatile__ (
-                       ".set\tnoat\n"
-#ifdef __BIG_ENDIAN
-                       "1:\tlbu\t%0, 0(%2)\n"
-                       "2:\tlbu\t$1, 1(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tlbu\t%0, 1(%2)\n"
-                       "2:\tlbu\t$1, 0(%2)\n\t"
-#endif
-                       "sll\t%0, 0x8\n\t"
-                       "or\t%0, $1\n\t"
-                       "li\t%1, 0\n"
-                       "3:\t.set\tat\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadHWU(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -258,28 +519,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 4))
                        goto sigbus;
 
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tlwl\t%0, (%2)\n"
-                       "2:\tlwr\t%0, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tlwl\t%0, 3(%2)\n"
-                       "2:\tlwr\t%0, (%2)\n\t"
-#endif
-                       "dsll\t%0, %0, 32\n\t"
-                       "dsrl\t%0, %0, 32\n\t"
-                       "li\t%1, 0\n"
-                       "3:\t.section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadWU(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -302,26 +542,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_READ, addr, 8))
                        goto sigbus;
 
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tldl\t%0, (%2)\n"
-                       "2:\tldr\t%0, 7(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tldl\t%0, 7(%2)\n"
-                       "2:\tldr\t%0, (%2)\n\t"
-#endif
-                       "li\t%1, 0\n"
-                       "3:\t.section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%1, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=&r" (value), "=r" (res)
-                       : "r" (addr), "i" (-EFAULT));
+               LoadDW(addr, value, res);
                if (res)
                        goto fault;
                compute_return_epc(regs);
@@ -336,68 +557,22 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_WRITE, addr, 2))
                        goto sigbus;
 
+               compute_return_epc(regs);
                value = regs->regs[insn.i_format.rt];
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       ".set\tnoat\n"
-                       "1:\tsb\t%1, 1(%2)\n\t"
-                       "srl\t$1, %1, 0x8\n"
-                       "2:\tsb\t$1, 0(%2)\n\t"
-                       ".set\tat\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       ".set\tnoat\n"
-                       "1:\tsb\t%1, 0(%2)\n\t"
-                       "srl\t$1,%1, 0x8\n"
-                       "2:\tsb\t$1, 1(%2)\n\t"
-                       ".set\tat\n\t"
-#endif
-                       "li\t%0, 0\n"
-                       "3:\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%0, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-                       : "=r" (res)
-                       : "r" (value), "r" (addr), "i" (-EFAULT));
+               StoreHW(addr, value, res);
                if (res)
                        goto fault;
-               compute_return_epc(regs);
                break;
 
        case sw_op:
                if (!access_ok(VERIFY_WRITE, addr, 4))
                        goto sigbus;
 
+               compute_return_epc(regs);
                value = regs->regs[insn.i_format.rt];
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tswl\t%1,(%2)\n"
-                       "2:\tswr\t%1, 3(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tswl\t%1, 3(%2)\n"
-                       "2:\tswr\t%1, (%2)\n\t"
-#endif
-                       "li\t%0, 0\n"
-                       "3:\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%0, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-               : "=r" (res)
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               StoreW(addr, value, res);
                if (res)
                        goto fault;
-               compute_return_epc(regs);
                break;
 
        case sd_op:
@@ -412,31 +587,11 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                if (!access_ok(VERIFY_WRITE, addr, 8))
                        goto sigbus;
 
+               compute_return_epc(regs);
                value = regs->regs[insn.i_format.rt];
-               __asm__ __volatile__ (
-#ifdef __BIG_ENDIAN
-                       "1:\tsdl\t%1,(%2)\n"
-                       "2:\tsdr\t%1, 7(%2)\n\t"
-#endif
-#ifdef __LITTLE_ENDIAN
-                       "1:\tsdl\t%1, 7(%2)\n"
-                       "2:\tsdr\t%1, (%2)\n\t"
-#endif
-                       "li\t%0, 0\n"
-                       "3:\n\t"
-                       ".section\t.fixup,\"ax\"\n\t"
-                       "4:\tli\t%0, %3\n\t"
-                       "j\t3b\n\t"
-                       ".previous\n\t"
-                       ".section\t__ex_table,\"a\"\n\t"
-                       STR(PTR)"\t1b, 4b\n\t"
-                       STR(PTR)"\t2b, 4b\n\t"
-                       ".previous"
-               : "=r" (res)
-               : "r" (value), "r" (addr), "i" (-EFAULT));
+               StoreDW(addr, value, res);
                if (res)
                        goto fault;
-               compute_return_epc(regs);
                break;
 #endif /* CONFIG_64BIT */
 
@@ -447,10 +602,21 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        case ldc1_op:
        case swc1_op:
        case sdc1_op:
-               /*
-                * I herewith declare: this does not happen.  So send SIGBUS.
-                */
-               goto sigbus;
+               die_if_kernel("Unaligned FP access in kernel code", regs);
+               BUG_ON(!used_math());
+               BUG_ON(!is_fpu_owner());
+
+               lose_fpu(1);    /* Save FPU state for the emulator. */
+               res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+                                              &fault_addr);
+               own_fpu(1);     /* Restore FPU state. */
+
+               /* Signal if something went wrong. */
+               process_fpemu_return(res, fault_addr);
+
+               if (res == 0)
+                       break;
+               return;
 
        /*
         * COP2 is available to implementor for application specific use.
@@ -488,6 +654,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        return;
 
 fault:
+       /* roll back jump/branch */
+       regs->cp0_epc = origpc;
+       regs->regs[31] = orig31;
        /* Did we have an exception handler installed? */
        if (fixup_exception(regs))
                return;
@@ -504,10 +673,881 @@ sigbus:
        return;
 
 sigill:
-       die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
+       die_if_kernel
+           ("Unhandled kernel unaligned access or invalid instruction", regs);
        force_sig(SIGILL, current);
 }
 
+/* Recode table from 16-bit register notation to 32-bit GPR. */
+const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
+const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
+
+void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr)
+{
+       unsigned long value;
+       unsigned int res;
+       int i;
+       unsigned int reg = 0, rvar;
+       unsigned long orig31;
+       u16 __user *pc16;
+       u16 halfword;
+       unsigned int word;
+       unsigned long origpc, contpc;
+       union mips_instruction insn;
+       struct mm_decoded_insn mminsn;
+       void __user *fault_addr = NULL;
+
+       origpc = regs->cp0_epc;
+       orig31 = regs->regs[31];
+
+       mminsn.micro_mips_mode = 1;
+
+       /*
+        * This load never faults.
+        */
+       pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+       __get_user(halfword, pc16);
+       pc16++;
+       contpc = regs->cp0_epc + 2;
+       word = ((unsigned int)halfword << 16);
+       mminsn.pc_inc = 2;
+
+       if (!mm_insn_16bit(halfword)) {
+               __get_user(halfword, pc16);
+               pc16++;
+               contpc = regs->cp0_epc + 4;
+               mminsn.pc_inc = 4;
+               word |= halfword;
+       }
+       mminsn.insn = word;
+
+       if (get_user(halfword, pc16))
+               goto fault;
+       mminsn.next_pc_inc = 2;
+       word = ((unsigned int)halfword << 16);
+
+       if (!mm_insn_16bit(halfword)) {
+               pc16++;
+               if (get_user(halfword, pc16))
+                       goto fault;
+               mminsn.next_pc_inc = 4;
+               word |= halfword;
+       }
+       mminsn.next_insn = word;
+
+       insn = (union mips_instruction)(mminsn.insn);
+       if (mm_isBranchInstr(regs, mminsn, &contpc))
+               insn = (union mips_instruction)(mminsn.next_insn);
+
+       /*  Parse instruction to find what to do */
+
+       switch (insn.mm_i_format.opcode) {
+
+       case mm_pool32a_op:
+               switch (insn.mm_x_format.func) {
+               case mm_lwxs_op:
+                       reg = insn.mm_x_format.rd;
+                       goto loadW;
+               }
+
+               goto sigbus;
+
+       case mm_pool32b_op:
+               switch (insn.mm_m_format.func) {
+               case mm_lwp_func:
+                       reg = insn.mm_m_format.rd;
+                       if (reg == 31)
+                               goto sigbus;
+
+                       if (!access_ok(VERIFY_READ, addr, 8))
+                               goto sigbus;
+
+                       LoadW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[reg] = value;
+                       addr += 4;
+                       LoadW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[reg + 1] = value;
+                       goto success;
+
+               case mm_swp_func:
+                       reg = insn.mm_m_format.rd;
+                       if (reg == 31)
+                               goto sigbus;
+
+                       if (!access_ok(VERIFY_WRITE, addr, 8))
+                               goto sigbus;
+
+                       value = regs->regs[reg];
+                       StoreW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       addr += 4;
+                       value = regs->regs[reg + 1];
+                       StoreW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       goto success;
+
+               case mm_ldp_func:
+#ifdef CONFIG_64BIT
+                       reg = insn.mm_m_format.rd;
+                       if (reg == 31)
+                               goto sigbus;
+
+                       if (!access_ok(VERIFY_READ, addr, 16))
+                               goto sigbus;
+
+                       LoadDW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[reg] = value;
+                       addr += 8;
+                       LoadDW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[reg + 1] = value;
+                       goto success;
+#endif /* CONFIG_64BIT */
+
+                       goto sigill;
+
+               case mm_sdp_func:
+#ifdef CONFIG_64BIT
+                       reg = insn.mm_m_format.rd;
+                       if (reg == 31)
+                               goto sigbus;
+
+                       if (!access_ok(VERIFY_WRITE, addr, 16))
+                               goto sigbus;
+
+                       value = regs->regs[reg];
+                       StoreDW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       addr += 8;
+                       value = regs->regs[reg + 1];
+                       StoreDW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       goto success;
+#endif /* CONFIG_64BIT */
+
+                       goto sigill;
+
+               case mm_lwm32_func:
+                       reg = insn.mm_m_format.rd;
+                       rvar = reg & 0xf;
+                       if ((rvar > 9) || !reg)
+                               goto sigill;
+                       if (reg & 0x10) {
+                               if (!access_ok
+                                   (VERIFY_READ, addr, 4 * (rvar + 1)))
+                                       goto sigbus;
+                       } else {
+                               if (!access_ok(VERIFY_READ, addr, 4 * rvar))
+                                       goto sigbus;
+                       }
+                       if (rvar == 9)
+                               rvar = 8;
+                       for (i = 16; rvar; rvar--, i++) {
+                               LoadW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                               regs->regs[i] = value;
+                       }
+                       if ((reg & 0xf) == 9) {
+                               LoadW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                               regs->regs[30] = value;
+                       }
+                       if (reg & 0x10) {
+                               LoadW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               regs->regs[31] = value;
+                       }
+                       goto success;
+
+               case mm_swm32_func:
+                       reg = insn.mm_m_format.rd;
+                       rvar = reg & 0xf;
+                       if ((rvar > 9) || !reg)
+                               goto sigill;
+                       if (reg & 0x10) {
+                               if (!access_ok
+                                   (VERIFY_WRITE, addr, 4 * (rvar + 1)))
+                                       goto sigbus;
+                       } else {
+                               if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
+                                       goto sigbus;
+                       }
+                       if (rvar == 9)
+                               rvar = 8;
+                       for (i = 16; rvar; rvar--, i++) {
+                               value = regs->regs[i];
+                               StoreW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                       }
+                       if ((reg & 0xf) == 9) {
+                               value = regs->regs[30];
+                               StoreW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                       }
+                       if (reg & 0x10) {
+                               value = regs->regs[31];
+                               StoreW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                       }
+                       goto success;
+
+               case mm_ldm_func:
+#ifdef CONFIG_64BIT
+                       reg = insn.mm_m_format.rd;
+                       rvar = reg & 0xf;
+                       if ((rvar > 9) || !reg)
+                               goto sigill;
+                       if (reg & 0x10) {
+                               if (!access_ok
+                                   (VERIFY_READ, addr, 8 * (rvar + 1)))
+                                       goto sigbus;
+                       } else {
+                               if (!access_ok(VERIFY_READ, addr, 8 * rvar))
+                                       goto sigbus;
+                       }
+                       if (rvar == 9)
+                               rvar = 8;
+
+                       for (i = 16; rvar; rvar--, i++) {
+                               LoadDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                               regs->regs[i] = value;
+                       }
+                       if ((reg & 0xf) == 9) {
+                               LoadDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 8;
+                               regs->regs[30] = value;
+                       }
+                       if (reg & 0x10) {
+                               LoadDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               regs->regs[31] = value;
+                       }
+                       goto success;
+#endif /* CONFIG_64BIT */
+
+                       goto sigill;
+
+               case mm_sdm_func:
+#ifdef CONFIG_64BIT
+                       reg = insn.mm_m_format.rd;
+                       rvar = reg & 0xf;
+                       if ((rvar > 9) || !reg)
+                               goto sigill;
+                       if (reg & 0x10) {
+                               if (!access_ok
+                                   (VERIFY_WRITE, addr, 8 * (rvar + 1)))
+                                       goto sigbus;
+                       } else {
+                               if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
+                                       goto sigbus;
+                       }
+                       if (rvar == 9)
+                               rvar = 8;
+
+                       for (i = 16; rvar; rvar--, i++) {
+                               value = regs->regs[i];
+                               StoreDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 8;
+                       }
+                       if ((reg & 0xf) == 9) {
+                               value = regs->regs[30];
+                               StoreDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 8;
+                       }
+                       if (reg & 0x10) {
+                               value = regs->regs[31];
+                               StoreDW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                       }
+                       goto success;
+#endif /* CONFIG_64BIT */
+
+                       goto sigill;
+
+                       /*  LWC2, SWC2, LDC2, SDC2 are not serviced */
+               }
+
+               goto sigbus;
+
+       case mm_pool32c_op:
+               switch (insn.mm_m_format.func) {
+               case mm_lwu_func:
+                       reg = insn.mm_m_format.rd;
+                       goto loadWU;
+               }
+
+               /*  LL,SC,LLD,SCD are not serviced */
+               goto sigbus;
+
+       case mm_pool32f_op:
+               switch (insn.mm_x_format.func) {
+               case mm_lwxc1_func:
+               case mm_swxc1_func:
+               case mm_ldxc1_func:
+               case mm_sdxc1_func:
+                       goto fpu_emul;
+               }
+
+               goto sigbus;
+
+       case mm_ldc132_op:
+       case mm_sdc132_op:
+       case mm_lwc132_op:
+       case mm_swc132_op:
+fpu_emul:
+               /* roll back jump/branch */
+               regs->cp0_epc = origpc;
+               regs->regs[31] = orig31;
+
+               die_if_kernel("Unaligned FP access in kernel code", regs);
+               BUG_ON(!used_math());
+               BUG_ON(!is_fpu_owner());
+
+               lose_fpu(1);    /* save the FPU state for the emulator */
+               res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+                                              &fault_addr);
+               own_fpu(1);     /* restore FPU state */
+
+               /* If something went wrong, signal */
+               process_fpemu_return(res, fault_addr);
+
+               if (res == 0)
+                       goto success;
+               return;
+
+       case mm_lh32_op:
+               reg = insn.mm_i_format.rt;
+               goto loadHW;
+
+       case mm_lhu32_op:
+               reg = insn.mm_i_format.rt;
+               goto loadHWU;
+
+       case mm_lw32_op:
+               reg = insn.mm_i_format.rt;
+               goto loadW;
+
+       case mm_sh32_op:
+               reg = insn.mm_i_format.rt;
+               goto storeHW;
+
+       case mm_sw32_op:
+               reg = insn.mm_i_format.rt;
+               goto storeW;
+
+       case mm_ld32_op:
+               reg = insn.mm_i_format.rt;
+               goto loadDW;
+
+       case mm_sd32_op:
+               reg = insn.mm_i_format.rt;
+               goto storeDW;
+
+       case mm_pool16c_op:
+               switch (insn.mm16_m_format.func) {
+               case mm_lwm16_op:
+                       reg = insn.mm16_m_format.rlist;
+                       rvar = reg + 1;
+                       if (!access_ok(VERIFY_READ, addr, 4 * rvar))
+                               goto sigbus;
+
+                       for (i = 16; rvar; rvar--, i++) {
+                               LoadW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                               regs->regs[i] = value;
+                       }
+                       LoadW(addr, value, res);
+                       if (res)
+                               goto fault;
+                       regs->regs[31] = value;
+
+                       goto success;
+
+               case mm_swm16_op:
+                       reg = insn.mm16_m_format.rlist;
+                       rvar = reg + 1;
+                       if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
+                               goto sigbus;
+
+                       for (i = 16; rvar; rvar--, i++) {
+                               value = regs->regs[i];
+                               StoreW(addr, value, res);
+                               if (res)
+                                       goto fault;
+                               addr += 4;
+                       }
+                       value = regs->regs[31];
+                       StoreW(addr, value, res);
+                       if (res)
+                               goto fault;
+
+                       goto success;
+
+               }
+
+               goto sigbus;
+
+       case mm_lhu16_op:
+               reg = reg16to32[insn.mm16_rb_format.rt];
+               goto loadHWU;
+
+       case mm_lw16_op:
+               reg = reg16to32[insn.mm16_rb_format.rt];
+               goto loadW;
+
+       case mm_sh16_op:
+               reg = reg16to32st[insn.mm16_rb_format.rt];
+               goto storeHW;
+
+       case mm_sw16_op:
+               reg = reg16to32st[insn.mm16_rb_format.rt];
+               goto storeW;
+
+       case mm_lwsp16_op:
+               reg = insn.mm16_r5_format.rt;
+               goto loadW;
+
+       case mm_swsp16_op:
+               reg = insn.mm16_r5_format.rt;
+               goto storeW;
+
+       case mm_lwgp16_op:
+               reg = reg16to32[insn.mm16_r3_format.rt];
+               goto loadW;
+
+       default:
+               goto sigill;
+       }
+
+loadHW:
+       if (!access_ok(VERIFY_READ, addr, 2))
+               goto sigbus;
+
+       LoadHW(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+
+loadHWU:
+       if (!access_ok(VERIFY_READ, addr, 2))
+               goto sigbus;
+
+       LoadHWU(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+
+loadW:
+       if (!access_ok(VERIFY_READ, addr, 4))
+               goto sigbus;
+
+       LoadW(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+
+loadWU:
+#ifdef CONFIG_64BIT
+       /*
+        * A 32-bit kernel might be running on a 64-bit processor.  But
+        * if we're on a 32-bit processor and an i-cache incoherency
+        * or race makes us see a 64-bit instruction here the sdl/sdr
+        * would blow up, so for now we don't handle unaligned 64-bit
+        * instructions on 32-bit kernels.
+        */
+       if (!access_ok(VERIFY_READ, addr, 4))
+               goto sigbus;
+
+       LoadWU(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+#endif /* CONFIG_64BIT */
+
+       /* Cannot handle 64-bit instructions in 32-bit kernel */
+       goto sigill;
+
+loadDW:
+#ifdef CONFIG_64BIT
+       /*
+        * A 32-bit kernel might be running on a 64-bit processor.  But
+        * if we're on a 32-bit processor and an i-cache incoherency
+        * or race makes us see a 64-bit instruction here the sdl/sdr
+        * would blow up, so for now we don't handle unaligned 64-bit
+        * instructions on 32-bit kernels.
+        */
+       if (!access_ok(VERIFY_READ, addr, 8))
+               goto sigbus;
+
+       LoadDW(addr, value, res);
+       if (res)
+               goto fault;
+       regs->regs[reg] = value;
+       goto success;
+#endif /* CONFIG_64BIT */
+
+       /* Cannot handle 64-bit instructions in 32-bit kernel */
+       goto sigill;
+
+storeHW:
+       if (!access_ok(VERIFY_WRITE, addr, 2))
+               goto sigbus;
+
+       value = regs->regs[reg];
+       StoreHW(addr, value, res);
+       if (res)
+               goto fault;
+       goto success;
+
+storeW:
+       if (!access_ok(VERIFY_WRITE, addr, 4))
+               goto sigbus;
+
+       value = regs->regs[reg];
+       StoreW(addr, value, res);
+       if (res)
+               goto fault;
+       goto success;
+
+storeDW:
+#ifdef CONFIG_64BIT
+       /*
+        * A 32-bit kernel might be running on a 64-bit processor.  But
+        * if we're on a 32-bit processor and an i-cache incoherency
+        * or race makes us see a 64-bit instruction here the sdl/sdr
+        * would blow up, so for now we don't handle unaligned 64-bit
+        * instructions on 32-bit kernels.
+        */
+       if (!access_ok(VERIFY_WRITE, addr, 8))
+               goto sigbus;
+
+       value = regs->regs[reg];
+       StoreDW(addr, value, res);
+       if (res)
+               goto fault;
+       goto success;
+#endif /* CONFIG_64BIT */
+
+       /* Cannot handle 64-bit instructions in 32-bit kernel */
+       goto sigill;
+
+success:
+       regs->cp0_epc = contpc; /* advance or branch */
+
+#ifdef CONFIG_DEBUG_FS
+       unaligned_instructions++;
+#endif
+       return;
+
+fault:
+       /* roll back jump/branch */
+       regs->cp0_epc = origpc;
+       regs->regs[31] = orig31;
+       /* Did we have an exception handler installed? */
+       if (fixup_exception(regs))
+               return;
+
+       die_if_kernel("Unhandled kernel unaligned access", regs);
+       force_sig(SIGSEGV, current);
+
+       return;
+
+sigbus:
+       die_if_kernel("Unhandled kernel unaligned access", regs);
+       force_sig(SIGBUS, current);
+
+       return;
+
+sigill:
+       die_if_kernel
+           ("Unhandled kernel unaligned access or invalid instruction", regs);
+       force_sig(SIGILL, current);
+}
+
+static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
+{
+       unsigned long value;
+       unsigned int res;
+       int reg;
+       unsigned long orig31;
+       u16 __user *pc16;
+       unsigned long origpc;
+       union mips16e_instruction mips16inst, oldinst;
+
+       origpc = regs->cp0_epc;
+       orig31 = regs->regs[31];
+       pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
+       /*
+        * This load never faults.
+        */
+       __get_user(mips16inst.full, pc16);
+       oldinst = mips16inst;
+
+       /* skip EXTEND instruction */
+       if (mips16inst.ri.opcode == MIPS16e_extend_op) {
+               pc16++;
+               __get_user(mips16inst.full, pc16);
+       } else if (delay_slot(regs)) {
+               /*  skip jump instructions */
+               /*  JAL/JALX are 32 bits but have OPCODE in first short int */
+               if (mips16inst.ri.opcode == MIPS16e_jal_op)
+                       pc16++;
+               pc16++;
+               if (get_user(mips16inst.full, pc16))
+                       goto sigbus;
+       }
+
+       switch (mips16inst.ri.opcode) {
+       case MIPS16e_i64_op:    /* I64 or RI64 instruction */
+               switch (mips16inst.i64.func) {  /* I64/RI64 func field check */
+               case MIPS16e_ldpc_func:
+               case MIPS16e_ldsp_func:
+                       reg = reg16to32[mips16inst.ri64.ry];
+                       goto loadDW;
+
+               case MIPS16e_sdsp_func:
+                       reg = reg16to32[mips16inst.ri64.ry];
+                       goto writeDW;
+
+               case MIPS16e_sdrasp_func:
+                       reg = 29;       /* GPRSP */
+                       goto writeDW;
+               }
+
+               goto sigbus;
+
+       case MIPS16e_swsp_op:
+       case MIPS16e_lwpc_op:
+       case MIPS16e_lwsp_op:
+               reg = reg16to32[mips16inst.ri.rx];
+               break;
+
+       case MIPS16e_i8_op:
+               if (mips16inst.i8.func != MIPS16e_swrasp_func)
+                       goto sigbus;
+               reg = 29;       /* GPRSP */
+               break;
+
+       default:
+               reg = reg16to32[mips16inst.rri.ry];
+               break;
+       }
+
+       switch (mips16inst.ri.opcode) {
+
+       case MIPS16e_lb_op:
+       case MIPS16e_lbu_op:
+       case MIPS16e_sb_op:
+               goto sigbus;
+
+       case MIPS16e_lh_op:
+               if (!access_ok(VERIFY_READ, addr, 2))
+                       goto sigbus;
+
+               LoadHW(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+
+       case MIPS16e_lhu_op:
+               if (!access_ok(VERIFY_READ, addr, 2))
+                       goto sigbus;
+
+               LoadHWU(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+
+       case MIPS16e_lw_op:
+       case MIPS16e_lwpc_op:
+       case MIPS16e_lwsp_op:
+               if (!access_ok(VERIFY_READ, addr, 4))
+                       goto sigbus;
+
+               LoadW(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+
+       case MIPS16e_lwu_op:
+#ifdef CONFIG_64BIT
+               /*
+                * A 32-bit kernel might be running on a 64-bit processor.  But
+                * if we're on a 32-bit processor and an i-cache incoherency
+                * or race makes us see a 64-bit instruction here the sdl/sdr
+                * would blow up, so for now we don't handle unaligned 64-bit
+                * instructions on 32-bit kernels.
+                */
+               if (!access_ok(VERIFY_READ, addr, 4))
+                       goto sigbus;
+
+               LoadWU(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+#endif /* CONFIG_64BIT */
+
+               /* Cannot handle 64-bit instructions in 32-bit kernel */
+               goto sigill;
+
+       case MIPS16e_ld_op:
+loadDW:
+#ifdef CONFIG_64BIT
+               /*
+                * A 32-bit kernel might be running on a 64-bit processor.  But
+                * if we're on a 32-bit processor and an i-cache incoherency
+                * or race makes us see a 64-bit instruction here the sdl/sdr
+                * would blow up, so for now we don't handle unaligned 64-bit
+                * instructions on 32-bit kernels.
+                */
+               if (!access_ok(VERIFY_READ, addr, 8))
+                       goto sigbus;
+
+               LoadDW(addr, value, res);
+               if (res)
+                       goto fault;
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               regs->regs[reg] = value;
+               break;
+#endif /* CONFIG_64BIT */
+
+               /* Cannot handle 64-bit instructions in 32-bit kernel */
+               goto sigill;
+
+       case MIPS16e_sh_op:
+               if (!access_ok(VERIFY_WRITE, addr, 2))
+                       goto sigbus;
+
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               value = regs->regs[reg];
+               StoreHW(addr, value, res);
+               if (res)
+                       goto fault;
+               break;
+
+       case MIPS16e_sw_op:
+       case MIPS16e_swsp_op:
+       case MIPS16e_i8_op:     /* actually - MIPS16e_swrasp_func */
+               if (!access_ok(VERIFY_WRITE, addr, 4))
+                       goto sigbus;
+
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               value = regs->regs[reg];
+               StoreW(addr, value, res);
+               if (res)
+                       goto fault;
+               break;
+
+       case MIPS16e_sd_op:
+writeDW:
+#ifdef CONFIG_64BIT
+               /*
+                * A 32-bit kernel might be running on a 64-bit processor.  But
+                * if we're on a 32-bit processor and an i-cache incoherency
+                * or race makes us see a 64-bit instruction here the sdl/sdr
+                * would blow up, so for now we don't handle unaligned 64-bit
+                * instructions on 32-bit kernels.
+                */
+               if (!access_ok(VERIFY_WRITE, addr, 8))
+                       goto sigbus;
+
+               MIPS16e_compute_return_epc(regs, &oldinst);
+               value = regs->regs[reg];
+               StoreDW(addr, value, res);
+               if (res)
+                       goto fault;
+               break;
+#endif /* CONFIG_64BIT */
+
+               /* Cannot handle 64-bit instructions in 32-bit kernel */
+               goto sigill;
+
+       default:
+               /*
+                * Pheeee...  We encountered an yet unknown instruction or
+                * cache coherence problem.  Die sucker, die ...
+                */
+               goto sigill;
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       unaligned_instructions++;
+#endif
+
+       return;
+
+fault:
+       /* roll back jump/branch */
+       regs->cp0_epc = origpc;
+       regs->regs[31] = orig31;
+       /* Did we have an exception handler installed? */
+       if (fixup_exception(regs))
+               return;
+
+       die_if_kernel("Unhandled kernel unaligned access", regs);
+       force_sig(SIGSEGV, current);
+
+       return;
+
+sigbus:
+       die_if_kernel("Unhandled kernel unaligned access", regs);
+       force_sig(SIGBUS, current);
+
+       return;
+
+sigill:
+       die_if_kernel
+           ("Unhandled kernel unaligned access or invalid instruction", regs);
+       force_sig(SIGILL, current);
+}
 asmlinkage void do_ade(struct pt_regs *regs)
 {
        unsigned int __user *pc;
@@ -517,23 +1557,62 @@ asmlinkage void do_ade(struct pt_regs *regs)
                        1, regs, regs->cp0_badvaddr);
        /*
         * Did we catch a fault trying to load an instruction?
-        * Or are we running in MIPS16 mode?
         */
-       if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
+       if (regs->cp0_badvaddr == regs->cp0_epc)
                goto sigbus;
 
-       pc = (unsigned int __user *) exception_epc(regs);
        if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
                goto sigbus;
        if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
                goto sigbus;
-       else if (unaligned_action == UNALIGNED_ACTION_SHOW)
-               show_registers(regs);
 
        /*
         * Do branch emulation only if we didn't forward the exception.
         * This is all so but ugly ...
         */
+
+       /*
+        * Are we running in microMIPS mode?
+        */
+       if (get_isa16_mode(regs->cp0_epc)) {
+               /*
+                * Did we catch a fault trying to load an instruction in
+                * 16-bit mode?
+                */
+               if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
+                       goto sigbus;
+               if (unaligned_action == UNALIGNED_ACTION_SHOW)
+                       show_registers(regs);
+
+               if (cpu_has_mmips) {
+                       seg = get_fs();
+                       if (!user_mode(regs))
+                               set_fs(KERNEL_DS);
+                       emulate_load_store_microMIPS(regs,
+                               (void __user *)regs->cp0_badvaddr);
+                       set_fs(seg);
+
+                       return;
+               }
+
+               if (cpu_has_mips16) {
+                       seg = get_fs();
+                       if (!user_mode(regs))
+                               set_fs(KERNEL_DS);
+                       emulate_load_store_MIPS16e(regs,
+                               (void __user *)regs->cp0_badvaddr);
+                       set_fs(seg);
+
+                       return;
+       }
+
+               goto sigbus;
+       }
+
+       if (unaligned_action == UNALIGNED_ACTION_SHOW)
+               show_registers(regs);
+       pc = (unsigned int __user *)exception_epc(regs);
+
        seg = get_fs();
        if (!user_mode(regs))
                set_fs(KERNEL_DS);
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
new file mode 100644 (file)
index 0000000..51617e4
--- /dev/null
@@ -0,0 +1,31 @@
+KVM/MIPS Trap & Emulate Release Notes
+=====================================
+
+(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
+    Malta Board with FPGA based 34K
+    Sigma Designs TangoX board with a 24K based 8654 SoC.
+    Malta Board with 74K @ 1GHz
+
+(2) Both Guest kernel and Guest Userspace execute in UM.
+    Guest User address space:   0x00000000 -> 0x40000000
+    Guest Kernel Unmapped:      0x40000000 -> 0x60000000
+    Guest Kernel Mapped:        0x60000000 -> 0x80000000
+
+    Guest Usermode virtual memory is limited to 1GB.
+
+(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
+    Note that due to cache aliasing issues, 4K page sizes are NOT supported.
+
+(3) No HugeTLB Support
+    Both the host kernel and Guest kernel should have the page size set to 16K.
+    This will be implemented in a future release.
+
+(4) KVM/MIPS does not have support for SMP Guests
+    Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
+       LL/TLBP/SC.  Since the TLBP instruction causes a trap the reservation gets cleared
+       when we ERET back to the guest. This causes the guest to hang in an infinite loop.
+       This will be fixed in a future release.
+
+(5) Use Host FPU
+    Currently KVM/MIPS emulates a 24K CPU without a FPU.
+    This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644 (file)
index 0000000..2c15590
--- /dev/null
@@ -0,0 +1,49 @@
+#
+# KVM configuration
+#
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       depends on HAVE_KVM
+       ---help---
+         Say Y here to get to see options for using your Linux host to run
+         other operating systems inside virtual machines (guests).
+         This option alone does not add any kernel code.
+
+         If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+       tristate "Kernel-based Virtual Machine (KVM) support"
+       depends on HAVE_KVM
+       select PREEMPT_NOTIFIERS
+       select ANON_INODES
+       select KVM_MMIO
+       ---help---
+         Support for hosting Guest kernels.
+         Currently supported on MIPS32 processors.
+
+config KVM_MIPS_DYN_TRANS
+       bool "KVM/MIPS: Dynamic binary translation to reduce traps"
+       depends on KVM
+       ---help---
+         When running in Trap & Emulate mode patch privileged
+         instructions to reduce the number of traps.
+
+         If unsure, say Y.
+
+config KVM_MIPS_DEBUG_COP0_COUNTERS
+       bool "Maintain counters for COP0 accesses"
+       depends on KVM
+       ---help---
+         Maintain statistics for Guest COP0 accesses.
+         A histogram of COP0 accesses is printed when the VM is
+         shutdown.
+
+         If unsure, say N.
+
+source drivers/vhost/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644 (file)
index 0000000..78d87bb
--- /dev/null
@@ -0,0 +1,13 @@
+# Makefile for KVM support for MIPS
+#
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
+
+kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
+           kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
+           kvm_mips_dyntrans.o kvm_trap_emul.o
+
+obj-$(CONFIG_KVM)      += kvm.o
+obj-y                  += kvm_cb.o kvm_tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
new file mode 100644 (file)
index 0000000..313c2e3
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kvm_host.h>
+
+struct kvm_mips_callbacks *kvm_mips_callbacks;
+EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
new file mode 100644 (file)
index 0000000..dca2aa6
--- /dev/null
@@ -0,0 +1,650 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Main entry point for the guest, exception handling.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+
+
+#define _C_LABEL(x)     x
+#define MIPSX(name)     mips32_ ## name
+#define CALLFRAME_SIZ   32
+
+/*
+ * VECTOR
+ *  exception vector entrypoint
+ */
+#define VECTOR(x, regmask)      \
+    .ent    _C_LABEL(x),0;      \
+    EXPORT(x);
+
+#define VECTOR_END(x)      \
+    EXPORT(x);
+
+/* Overload, Danger Will Robinson!! */
+#define PT_HOST_ASID        PT_BVADDR
+#define PT_HOST_USERLOCAL   PT_EPC
+
+#define CP0_DDATA_LO        $28,3
+#define CP0_EBASE           $15,1
+
+#define CP0_INTCTL          $12,1
+#define CP0_SRSCTL          $12,2
+#define CP0_SRSMAP          $12,3
+#define CP0_HWRENA          $7,0
+
+/* Resume Flags */
+#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+/*
+ * __kvm_mips_vcpu_run: entry point to the guest
+ * a0: run
+ * a1: vcpu
+ */
+
+FEXPORT(__kvm_mips_vcpu_run)
+    .set    push
+    .set    noreorder
+    .set    noat
+
+    /* k0/k1 not being used in host kernel context */
+       addiu           k1,sp, -PT_SIZE
+    LONG_S         $0, PT_R0(k1)
+    LONG_S             $1, PT_R1(k1)
+    LONG_S             $2, PT_R2(k1)
+    LONG_S             $3, PT_R3(k1)
+
+    LONG_S             $4, PT_R4(k1)
+    LONG_S             $5, PT_R5(k1)
+    LONG_S             $6, PT_R6(k1)
+    LONG_S             $7, PT_R7(k1)
+
+    LONG_S             $8,  PT_R8(k1)
+    LONG_S             $9,  PT_R9(k1)
+    LONG_S             $10, PT_R10(k1)
+    LONG_S             $11, PT_R11(k1)
+    LONG_S             $12, PT_R12(k1)
+    LONG_S             $13, PT_R13(k1)
+    LONG_S             $14, PT_R14(k1)
+    LONG_S             $15, PT_R15(k1)
+    LONG_S             $16, PT_R16(k1)
+    LONG_S             $17, PT_R17(k1)
+
+    LONG_S             $18, PT_R18(k1)
+    LONG_S             $19, PT_R19(k1)
+    LONG_S             $20, PT_R20(k1)
+    LONG_S             $21, PT_R21(k1)
+    LONG_S             $22, PT_R22(k1)
+    LONG_S             $23, PT_R23(k1)
+    LONG_S             $24, PT_R24(k1)
+    LONG_S             $25, PT_R25(k1)
+
+       /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
+
+    LONG_S             $28, PT_R28(k1)
+    LONG_S             $29, PT_R29(k1)
+    LONG_S             $30, PT_R30(k1)
+    LONG_S             $31, PT_R31(k1)
+
+    /* Save hi/lo */
+       mflo            v0
+       LONG_S          v0, PT_LO(k1)
+       mfhi            v1
+       LONG_S          v1, PT_HI(k1)
+
+       /* Save host status */
+       mfc0            v0, CP0_STATUS
+       LONG_S          v0, PT_STATUS(k1)
+
+       /* Save host ASID, shove it into the BVADDR location */
+       mfc0            v1,CP0_ENTRYHI
+       andi            v1, 0xff
+       LONG_S          v1, PT_HOST_ASID(k1)
+
+    /* Save DDATA_LO, will be used to store pointer to vcpu */
+    mfc0        v1, CP0_DDATA_LO
+    LONG_S      v1, PT_HOST_USERLOCAL(k1)
+
+    /* DDATA_LO has pointer to vcpu */
+    mtc0        a1,CP0_DDATA_LO
+
+    /* Offset into vcpu->arch */
+       addiu           k1, a1, VCPU_HOST_ARCH
+
+    /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */
+    LONG_S      sp, VCPU_HOST_STACK(k1)
+
+    /* Save the kernel gp as well */
+    LONG_S      gp, VCPU_HOST_GP(k1)
+
+       /* Setup status register for running the guest in UM, interrupts are disabled */
+       li                      k0,(ST0_EXL | KSU_USER| ST0_BEV)
+       mtc0            k0,CP0_STATUS
+    ehb
+
+    /* load up the new EBASE */
+    LONG_L      k0, VCPU_GUEST_EBASE(k1)
+    mtc0        k0,CP0_EBASE
+
+    /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was
+     * but make sure that timer interrupts are enabled
+     */
+    li          k0,(ST0_EXL | KSU_USER | ST0_IE)
+    andi        v0, v0, ST0_IM
+    or          k0, k0, v0
+    mtc0        k0,CP0_STATUS
+    ehb
+
+
+       /* Set Guest EPC */
+       LONG_L          t0, VCPU_PC(k1)
+       mtc0            t0, CP0_EPC
+
+FEXPORT(__kvm_mips_load_asid)
+    /* Set the ASID for the Guest Kernel */
+    sll         t0, t0, 1                       /* with kseg0 @ 0x40000000, kernel */
+                                                /* addresses shift to 0x80000000 */
+    bltz        t0, 1f                          /* If kernel */
+       addiu       t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+    addiu       t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+    /* t1: contains the base of the ASID array, need to get the cpu id  */
+    LONG_L      t2, TI_CPU($28)             /* smp_processor_id */
+    sll         t2, t2, 2                   /* x4 */
+    addu        t3, t1, t2
+    LONG_L      k0, (t3)
+    andi        k0, k0, 0xff
+       mtc0            k0,CP0_ENTRYHI
+    ehb
+
+    /* Disable RDHWR access */
+    mtc0    zero,  CP0_HWRENA
+
+    /* Now load up the Guest Context from VCPU */
+    LONG_L             $1, VCPU_R1(k1)
+    LONG_L             $2, VCPU_R2(k1)
+    LONG_L             $3, VCPU_R3(k1)
+
+    LONG_L             $4, VCPU_R4(k1)
+    LONG_L             $5, VCPU_R5(k1)
+    LONG_L             $6, VCPU_R6(k1)
+    LONG_L             $7, VCPU_R7(k1)
+
+    LONG_L             $8,  VCPU_R8(k1)
+    LONG_L             $9,  VCPU_R9(k1)
+    LONG_L             $10, VCPU_R10(k1)
+    LONG_L             $11, VCPU_R11(k1)
+    LONG_L             $12, VCPU_R12(k1)
+    LONG_L             $13, VCPU_R13(k1)
+    LONG_L             $14, VCPU_R14(k1)
+    LONG_L             $15, VCPU_R15(k1)
+    LONG_L             $16, VCPU_R16(k1)
+    LONG_L             $17, VCPU_R17(k1)
+    LONG_L             $18, VCPU_R18(k1)
+    LONG_L             $19, VCPU_R19(k1)
+    LONG_L             $20, VCPU_R20(k1)
+    LONG_L             $21, VCPU_R21(k1)
+    LONG_L             $22, VCPU_R22(k1)
+    LONG_L             $23, VCPU_R23(k1)
+    LONG_L             $24, VCPU_R24(k1)
+    LONG_L             $25, VCPU_R25(k1)
+
+    /* k0/k1 loaded up later */
+
+    LONG_L             $28, VCPU_R28(k1)
+    LONG_L             $29, VCPU_R29(k1)
+    LONG_L             $30, VCPU_R30(k1)
+    LONG_L             $31, VCPU_R31(k1)
+
+    /* Restore hi/lo */
+       LONG_L          k0, VCPU_LO(k1)
+       mtlo            k0
+
+       LONG_L          k0, VCPU_HI(k1)
+       mthi            k0
+
+FEXPORT(__kvm_mips_load_k0k1)
+       /* Restore the guest's k0/k1 registers */
+    LONG_L             k0, VCPU_R26(k1)
+    LONG_L             k1, VCPU_R27(k1)
+
+    /* Jump to guest */
+       eret
+       .set    pop
+
+VECTOR(MIPSX(exception), unknown)
+/*
+ * Find out what mode we came from and jump to the proper handler.
+ */
+    .set    push
+       .set    noat
+    .set    noreorder
+    mtc0    k0, CP0_ERROREPC    #01: Save guest k0
+    ehb                         #02:
+
+    mfc0    k0, CP0_EBASE       #02: Get EBASE
+    srl     k0, k0, 10          #03: Get rid of CPUNum
+    sll     k0, k0, 10          #04
+    LONG_S  k1, 0x3000(k0)      #05: Save k1 @ offset 0x3000
+    addiu   k0, k0, 0x2000      #06: Exception handler is installed @ offset 0x2000
+       j       k0                                      #07: jump to the function
+       nop                                             #08: branch delay slot
+       .set    push
+VECTOR_END(MIPSX(exceptionEnd))
+.end MIPSX(exception)
+
+/*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ *
+ */
+NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
+    .set    push
+    .set    noat
+    .set    noreorder
+
+    /* Get the VCPU pointer from DDTATA_LO */
+    mfc0        k1, CP0_DDATA_LO
+       addiu           k1, k1, VCPU_HOST_ARCH
+
+    /* Start saving Guest context to VCPU */
+    LONG_S  $0, VCPU_R0(k1)
+    LONG_S  $1, VCPU_R1(k1)
+    LONG_S  $2, VCPU_R2(k1)
+    LONG_S  $3, VCPU_R3(k1)
+    LONG_S  $4, VCPU_R4(k1)
+    LONG_S  $5, VCPU_R5(k1)
+    LONG_S  $6, VCPU_R6(k1)
+    LONG_S  $7, VCPU_R7(k1)
+    LONG_S  $8, VCPU_R8(k1)
+    LONG_S  $9, VCPU_R9(k1)
+    LONG_S  $10, VCPU_R10(k1)
+    LONG_S  $11, VCPU_R11(k1)
+    LONG_S  $12, VCPU_R12(k1)
+    LONG_S  $13, VCPU_R13(k1)
+    LONG_S  $14, VCPU_R14(k1)
+    LONG_S  $15, VCPU_R15(k1)
+    LONG_S  $16, VCPU_R16(k1)
+    LONG_S  $17,VCPU_R17(k1)
+    LONG_S  $18, VCPU_R18(k1)
+    LONG_S  $19, VCPU_R19(k1)
+    LONG_S  $20, VCPU_R20(k1)
+    LONG_S  $21, VCPU_R21(k1)
+    LONG_S  $22, VCPU_R22(k1)
+    LONG_S  $23, VCPU_R23(k1)
+    LONG_S  $24, VCPU_R24(k1)
+    LONG_S  $25, VCPU_R25(k1)
+
+    /* Guest k0/k1 saved later */
+
+    LONG_S  $28, VCPU_R28(k1)
+    LONG_S  $29, VCPU_R29(k1)
+    LONG_S  $30, VCPU_R30(k1)
+    LONG_S  $31, VCPU_R31(k1)
+
+    /* We need to save hi/lo and restore them on
+     * the way out
+     */
+    mfhi    t0
+    LONG_S  t0, VCPU_HI(k1)
+
+    mflo    t0
+    LONG_S  t0, VCPU_LO(k1)
+
+    /* Finally save guest k0/k1 to VCPU */
+    mfc0    t0, CP0_ERROREPC
+    LONG_S  t0, VCPU_R26(k1)
+
+    /* Get GUEST k1 and save it in VCPU */
+    la      t1, ~0x2ff
+    mfc0    t0, CP0_EBASE
+    and     t0, t0, t1
+    LONG_L  t0, 0x3000(t0)
+    LONG_S  t0, VCPU_R27(k1)
+
+    /* Now that context has been saved, we can use other registers */
+
+    /* Restore vcpu */
+    mfc0        a1, CP0_DDATA_LO
+    move        s1, a1
+
+   /* Restore run (vcpu->run) */
+    LONG_L      a0, VCPU_RUN(a1)
+    /* Save pointer to run in s0, will be saved by the compiler */
+    move        s0, a0
+
+
+    /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */
+    mfc0    k0,CP0_EPC
+    LONG_S  k0, VCPU_PC(k1)
+
+    mfc0    k0, CP0_BADVADDR
+    LONG_S  k0, VCPU_HOST_CP0_BADVADDR(k1)
+
+    mfc0    k0, CP0_CAUSE
+    LONG_S  k0, VCPU_HOST_CP0_CAUSE(k1)
+
+    mfc0    k0, CP0_ENTRYHI
+    LONG_S  k0, VCPU_HOST_ENTRYHI(k1)
+
+    /* Now restore the host state just enough to run the handlers */
+
+    /* Swtich EBASE to the one used by Linux */
+    /* load up the host EBASE */
+    mfc0        v0, CP0_STATUS
+
+    .set at
+       or          k0, v0, ST0_BEV
+    .set noat
+
+    mtc0        k0, CP0_STATUS
+    ehb
+
+    LONG_L      k0, VCPU_HOST_EBASE(k1)
+    mtc0        k0,CP0_EBASE
+
+
+    /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+    .set at
+       and         v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
+    or          v0, v0, ST0_CU0
+    .set noat
+    mtc0        v0, CP0_STATUS
+    ehb
+
+    /* Load up host GP */
+    LONG_L  gp, VCPU_HOST_GP(k1)
+
+    /* Need a stack before we can jump to "C" */
+    LONG_L  sp, VCPU_HOST_STACK(k1)
+
+    /* Saved host state */
+    addiu   sp,sp, -PT_SIZE
+
+    /* XXXKYMA do we need to load the host ASID, maybe not because the
+     * kernel entries are marked GLOBAL, need to verify
+     */
+
+    /* Restore host DDATA_LO */
+    LONG_L      k0, PT_HOST_USERLOCAL(sp)
+    mtc0        k0, CP0_DDATA_LO
+
+    /* Restore RDHWR access */
+    la      k0, 0x2000000F
+    mtc0    k0,  CP0_HWRENA
+
+    /* Jump to handler */
+FEXPORT(__kvm_mips_jump_to_handler)
+    /* XXXKYMA: not sure if this is safe, how large is the stack?? */
+    /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */
+    la          t9,kvm_mips_handle_exit
+    jalr.hb     t9
+    addiu       sp,sp, -CALLFRAME_SIZ           /* BD Slot */
+
+    /* Return from handler Make sure interrupts are disabled */
+    di
+    ehb
+
+    /* XXXKYMA: k0/k1 could have been blown away if we processed an exception
+     * while we were handling the exception from the guest, reload k1
+     */
+    move        k1, s1
+       addiu           k1, k1, VCPU_HOST_ARCH
+
+    /* Check return value, should tell us if we are returning to the host (handle I/O etc)
+     * or resuming the guest
+     */
+    andi        t0, v0, RESUME_HOST
+    bnez        t0, __kvm_mips_return_to_host
+    nop
+
+__kvm_mips_return_to_guest:
+    /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
+    mtc0        s1, CP0_DDATA_LO
+
+    /* Load up the Guest EBASE to minimize the window where BEV is set */
+    LONG_L      t0, VCPU_GUEST_EBASE(k1)
+
+    /* Switch EBASE back to the one used by KVM */
+    mfc0        v1, CP0_STATUS
+    .set at
+       or          k0, v1, ST0_BEV
+    .set noat
+    mtc0        k0, CP0_STATUS
+    ehb
+    mtc0        t0,CP0_EBASE
+
+    /* Setup status register for running guest in UM */
+    .set at
+    or     v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+    and     v1, v1, ~ST0_CU0
+    .set noat
+    mtc0    v1, CP0_STATUS
+    ehb
+
+
+       /* Set Guest EPC */
+       LONG_L          t0, VCPU_PC(k1)
+       mtc0            t0, CP0_EPC
+
+    /* Set the ASID for the Guest Kernel */
+    sll         t0, t0, 1                       /* with kseg0 @ 0x40000000, kernel */
+                                                /* addresses shift to 0x80000000 */
+    bltz        t0, 1f                          /* If kernel */
+       addiu       t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+    addiu       t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+    /* t1: contains the base of the ASID array, need to get the cpu id  */
+    LONG_L      t2, TI_CPU($28)             /* smp_processor_id */
+    sll         t2, t2, 2                   /* x4 */
+    addu        t3, t1, t2
+    LONG_L      k0, (t3)
+    andi        k0, k0, 0xff
+       mtc0            k0,CP0_ENTRYHI
+    ehb
+
+    /* Disable RDHWR access */
+    mtc0    zero,  CP0_HWRENA
+
+    /* load the guest context from VCPU and return */
+    LONG_L  $0, VCPU_R0(k1)
+    LONG_L  $1, VCPU_R1(k1)
+    LONG_L  $2, VCPU_R2(k1)
+    LONG_L  $3, VCPU_R3(k1)
+    LONG_L  $4, VCPU_R4(k1)
+    LONG_L  $5, VCPU_R5(k1)
+    LONG_L  $6, VCPU_R6(k1)
+    LONG_L  $7, VCPU_R7(k1)
+    LONG_L  $8, VCPU_R8(k1)
+    LONG_L  $9, VCPU_R9(k1)
+    LONG_L  $10, VCPU_R10(k1)
+    LONG_L  $11, VCPU_R11(k1)
+    LONG_L  $12, VCPU_R12(k1)
+    LONG_L  $13, VCPU_R13(k1)
+    LONG_L  $14, VCPU_R14(k1)
+    LONG_L  $15, VCPU_R15(k1)
+    LONG_L  $16, VCPU_R16(k1)
+    LONG_L  $17, VCPU_R17(k1)
+    LONG_L  $18, VCPU_R18(k1)
+    LONG_L  $19, VCPU_R19(k1)
+    LONG_L  $20, VCPU_R20(k1)
+    LONG_L  $21, VCPU_R21(k1)
+    LONG_L  $22, VCPU_R22(k1)
+    LONG_L  $23, VCPU_R23(k1)
+    LONG_L  $24, VCPU_R24(k1)
+    LONG_L  $25, VCPU_R25(k1)
+
+    /* $/k1 loaded later */
+    LONG_L  $28, VCPU_R28(k1)
+    LONG_L  $29, VCPU_R29(k1)
+    LONG_L  $30, VCPU_R30(k1)
+    LONG_L  $31, VCPU_R31(k1)
+
+FEXPORT(__kvm_mips_skip_guest_restore)
+    LONG_L  k0, VCPU_HI(k1)
+    mthi    k0
+
+    LONG_L  k0, VCPU_LO(k1)
+    mtlo    k0
+
+    LONG_L  k0, VCPU_R26(k1)
+    LONG_L  k1, VCPU_R27(k1)
+
+    eret
+
+__kvm_mips_return_to_host:
+    /* EBASE is already pointing to Linux */
+    LONG_L  k1, VCPU_HOST_STACK(k1)
+       addiu   k1,k1, -PT_SIZE
+
+    /* Restore host DDATA_LO */
+    LONG_L      k0, PT_HOST_USERLOCAL(k1)
+    mtc0        k0, CP0_DDATA_LO
+
+    /* Restore host ASID */
+    LONG_L      k0, PT_HOST_ASID(sp)
+    andi        k0, 0xff
+    mtc0        k0,CP0_ENTRYHI
+    ehb
+
+    /* Load context saved on the host stack */
+    LONG_L  $0, PT_R0(k1)
+    LONG_L  $1, PT_R1(k1)
+
+    /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code  */
+    sra     k0, v0, 2
+    move    $2, k0
+
+    LONG_L  $3, PT_R3(k1)
+    LONG_L  $4, PT_R4(k1)
+    LONG_L  $5, PT_R5(k1)
+    LONG_L  $6, PT_R6(k1)
+    LONG_L  $7, PT_R7(k1)
+    LONG_L  $8, PT_R8(k1)
+    LONG_L  $9, PT_R9(k1)
+    LONG_L  $10, PT_R10(k1)
+    LONG_L  $11, PT_R11(k1)
+    LONG_L  $12, PT_R12(k1)
+    LONG_L  $13, PT_R13(k1)
+    LONG_L  $14, PT_R14(k1)
+    LONG_L  $15, PT_R15(k1)
+    LONG_L  $16, PT_R16(k1)
+    LONG_L  $17, PT_R17(k1)
+    LONG_L  $18, PT_R18(k1)
+    LONG_L  $19, PT_R19(k1)
+    LONG_L  $20, PT_R20(k1)
+    LONG_L  $21, PT_R21(k1)
+    LONG_L  $22, PT_R22(k1)
+    LONG_L  $23, PT_R23(k1)
+    LONG_L  $24, PT_R24(k1)
+    LONG_L  $25, PT_R25(k1)
+
+    /* Host k0/k1 were not saved */
+
+    LONG_L  $28, PT_R28(k1)
+    LONG_L  $29, PT_R29(k1)
+    LONG_L  $30, PT_R30(k1)
+
+    LONG_L  k0, PT_HI(k1)
+    mthi    k0
+
+    LONG_L  k0, PT_LO(k1)
+    mtlo    k0
+
+    /* Restore RDHWR access */
+    la      k0, 0x2000000F
+    mtc0    k0,  CP0_HWRENA
+
+
+    /* Restore RA, which is the address we will return to */
+    LONG_L  ra, PT_R31(k1)
+    j       ra
+    nop
+
+    .set    pop
+VECTOR_END(MIPSX(GuestExceptionEnd))
+.end MIPSX(GuestException)
+
+MIPSX(exceptions):
+       ####
+       ##### The exception handlers.
+       #####
+       .word _C_LABEL(MIPSX(GuestException))   #  0
+       .word _C_LABEL(MIPSX(GuestException))   #  1
+       .word _C_LABEL(MIPSX(GuestException))   #  2
+       .word _C_LABEL(MIPSX(GuestException))   #  3
+       .word _C_LABEL(MIPSX(GuestException))   #  4
+       .word _C_LABEL(MIPSX(GuestException))   #  5
+       .word _C_LABEL(MIPSX(GuestException))   #  6
+       .word _C_LABEL(MIPSX(GuestException))   #  7
+       .word _C_LABEL(MIPSX(GuestException))   #  8
+       .word _C_LABEL(MIPSX(GuestException))   #  9
+       .word _C_LABEL(MIPSX(GuestException))   # 10
+       .word _C_LABEL(MIPSX(GuestException))   # 11
+       .word _C_LABEL(MIPSX(GuestException))   # 12
+       .word _C_LABEL(MIPSX(GuestException))   # 13
+       .word _C_LABEL(MIPSX(GuestException))   # 14
+       .word _C_LABEL(MIPSX(GuestException))   # 15
+       .word _C_LABEL(MIPSX(GuestException))   # 16
+       .word _C_LABEL(MIPSX(GuestException))   # 17
+       .word _C_LABEL(MIPSX(GuestException))   # 18
+       .word _C_LABEL(MIPSX(GuestException))   # 19
+       .word _C_LABEL(MIPSX(GuestException))   # 20
+       .word _C_LABEL(MIPSX(GuestException))   # 21
+       .word _C_LABEL(MIPSX(GuestException))   # 22
+       .word _C_LABEL(MIPSX(GuestException))   # 23
+       .word _C_LABEL(MIPSX(GuestException))   # 24
+       .word _C_LABEL(MIPSX(GuestException))   # 25
+       .word _C_LABEL(MIPSX(GuestException))   # 26
+       .word _C_LABEL(MIPSX(GuestException))   # 27
+       .word _C_LABEL(MIPSX(GuestException))   # 28
+       .word _C_LABEL(MIPSX(GuestException))   # 29
+       .word _C_LABEL(MIPSX(GuestException))   # 30
+       .word _C_LABEL(MIPSX(GuestException))   # 31
+
+
+/* This routine makes changes to the instruction stream effective to the hardware.
+ * It should be called after the instruction stream is written.
+ * On return, the new instructions are effective.
+ * Inputs:
+ * a0 = Start address of new instruction stream
+ * a1 = Size, in bytes, of new instruction stream
+ */
+
+#define HW_SYNCI_Step       $1
+LEAF(MIPSX(SyncICache))
+    .set    push
+       .set    mips32r2
+    beq     a1, zero, 20f
+    nop
+    addu    a1, a0, a1
+    rdhwr   v0, HW_SYNCI_Step
+    beq     v0, zero, 20f
+    nop
+
+10:
+    synci   0(a0)
+    addu    a0, a0, v0
+    sltu    v1, a0, a1
+    bne     v1, zero, 10b
+    nop
+    sync
+20:
+    jr.hb   ra
+    nop
+    .set pop
+END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
new file mode 100644 (file)
index 0000000..e0dad02
--- /dev/null
@@ -0,0 +1,958 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifndef VECTORSPACING
+#define VECTORSPACING 0x100    /* for EI/VI mode */
+#endif
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { "wait", VCPU_STAT(wait_exits) },
+       { "cache", VCPU_STAT(cache_exits) },
+       { "signal", VCPU_STAT(signal_exits) },
+       { "interrupt", VCPU_STAT(int_exits) },
+       { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
+       { "tlbmod", VCPU_STAT(tlbmod_exits) },
+       { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
+       { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
+       { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
+       { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
+       { "syscall", VCPU_STAT(syscall_exits) },
+       { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
+       { "break_inst", VCPU_STAT(break_inst_exits) },
+       { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
+       { "halt_wakeup", VCPU_STAT(halt_wakeup) },
+       {NULL}
+};
+
+static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+       int i;
+       for_each_possible_cpu(i) {
+               vcpu->arch.guest_kernel_asid[i] = 0;
+               vcpu->arch.guest_user_asid[i] = 0;
+       }
+       return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+       return gfn;
+}
+
+/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
+ * are "runnable" if interrupts are pending
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+       return !!(vcpu->arch.pending_exceptions);
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+       int *r = (int *)rtn;
+       *r = 0;
+       return;
+}
+
+static void kvm_mips_init_tlbs(struct kvm *kvm)
+{
+       unsigned long wired;
+
+       /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
+       wired = read_c0_wired();
+       write_c0_wired(wired + 1);
+       mtc0_tlbw_hazard();
+       kvm->arch.commpage_tlb = wired;
+
+       kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
+                 kvm->arch.commpage_tlb);
+}
+
+static void kvm_mips_init_vm_percpu(void *arg)
+{
+       struct kvm *kvm = (struct kvm *)arg;
+
+       kvm_mips_init_tlbs(kvm);
+       kvm_mips_callbacks->vm_init(kvm);
+
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+       if (atomic_inc_return(&kvm_mips_instance) == 1) {
+               kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
+                        __func__);
+               on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
+       }
+
+
+       return 0;
+}
+
+void kvm_mips_free_vcpus(struct kvm *kvm)
+{
+       unsigned int i;
+       struct kvm_vcpu *vcpu;
+
+       /* Put the pages we reserved for the guest pmap */
+       for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
+               if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+                       kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+       }
+
+       if (kvm->arch.guest_pmap)
+               kfree(kvm->arch.guest_pmap);
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               kvm_arch_vcpu_free(vcpu);
+       }
+
+       mutex_lock(&kvm->lock);
+
+       for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+               kvm->vcpus[i] = NULL;
+
+       atomic_set(&kvm->online_vcpus, 0);
+
+       mutex_unlock(&kvm->lock);
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+static void kvm_mips_uninit_tlbs(void *arg)
+{
+       /* Restore wired count */
+       write_c0_wired(0);
+       mtc0_tlbw_hazard();
+       /* Clear out all the TLBs */
+       kvm_local_flush_tlb_all();
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+       kvm_mips_free_vcpus(kvm);
+
+       /* If this is the last instance, restore wired count */
+       if (atomic_dec_return(&kvm_mips_instance) == 0) {
+               kvm_info("%s: last KVM instance, restoring TLB parameters\n",
+                        __func__);
+               on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
+       }
+}
+
+long
+kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       return -EINVAL;
+}
+
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+       return 0;
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                struct kvm_memory_slot *memslot,
+                                struct kvm_userspace_memory_region *mem,
+                                enum kvm_mr_change change)
+{
+       return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                                struct kvm_userspace_memory_region *mem,
+                                const struct kvm_memory_slot *old,
+                                enum kvm_mr_change change)
+{
+       unsigned long npages = 0;
+       int i, err = 0;
+
+       kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
+                 __func__, kvm, mem->slot, mem->guest_phys_addr,
+                 mem->memory_size, mem->userspace_addr);
+
+       /* Setup Guest PMAP table */
+       if (!kvm->arch.guest_pmap) {
+               if (mem->slot == 0)
+                       npages = mem->memory_size >> PAGE_SHIFT;
+
+               if (npages) {
+                       kvm->arch.guest_pmap_npages = npages;
+                       kvm->arch.guest_pmap =
+                           kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
+
+                       if (!kvm->arch.guest_pmap) {
+                               kvm_err("Failed to allocate guest PMAP");
+                               err = -ENOMEM;
+                               goto out;
+                       }
+
+                       kvm_info
+                           ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
+                            npages, kvm->arch.guest_pmap);
+
+                       /* Now setup the page table */
+                       for (i = 0; i < npages; i++) {
+                               kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
+                       }
+               }
+       }
+out:
+       return;
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+{
+}
+
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+       extern char mips32_exception[], mips32_exceptionEnd[];
+       extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+       int err, size, offset;
+       void *gebase;
+       int i;
+
+       struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+
+       if (!vcpu) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = kvm_vcpu_init(vcpu, kvm, id);
+
+       if (err)
+               goto out_free_cpu;
+
+       kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+
+       /* Allocate space for host mode exception handlers that handle
+        * guest mode exits
+        */
+       if (cpu_has_veic || cpu_has_vint) {
+               size = 0x200 + VECTORSPACING * 64;
+       } else {
+               size = 0x200;
+       }
+
+       /* Save Linux EBASE */
+       vcpu->arch.host_ebase = (void *)read_c0_ebase();
+
+       gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+
+       if (!gebase) {
+               err = -ENOMEM;
+               goto out_free_cpu;
+       }
+       kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+                ALIGN(size, PAGE_SIZE), gebase);
+
+       /* Save new ebase */
+       vcpu->arch.guest_ebase = gebase;
+
+       /* Copy L1 Guest Exception handler to correct offset */
+
+       /* TLB Refill, EXL = 0 */
+       memcpy(gebase, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+
+       /* General Exception Entry point */
+       memcpy(gebase + 0x180, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+
+       /* For vectored interrupts poke the exception code @ all offsets 0-7 */
+       for (i = 0; i < 8; i++) {
+               kvm_debug("L1 Vectored handler @ %p\n",
+                         gebase + 0x200 + (i * VECTORSPACING));
+               memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
+                      mips32_exceptionEnd - mips32_exception);
+       }
+
+       /* General handler, relocate to unmapped space for sanity's sake */
+       offset = 0x2000;
+       kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
+                gebase + offset,
+                mips32_GuestExceptionEnd - mips32_GuestException);
+
+       memcpy(gebase + offset, mips32_GuestException,
+              mips32_GuestExceptionEnd - mips32_GuestException);
+
+       /* Invalidate the icache for these ranges */
+       mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
+
+       /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
+       vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
+
+       if (!vcpu->arch.kseg0_commpage) {
+               err = -ENOMEM;
+               goto out_free_gebase;
+       }
+
+       kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
+       kvm_mips_commpage_init(vcpu);
+
+       /* Init */
+       vcpu->arch.last_sched_cpu = -1;
+
+       /* Start off the timer */
+       kvm_mips_emulate_count(vcpu);
+
+       return vcpu;
+
+out_free_gebase:
+       kfree(gebase);
+
+out_free_cpu:
+       kfree(vcpu);
+
+out:
+       return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+       kvm_vcpu_uninit(vcpu);
+
+       kvm_mips_dump_stats(vcpu);
+
+       if (vcpu->arch.guest_ebase)
+               kfree(vcpu->arch.guest_ebase);
+
+       if (vcpu->arch.kseg0_commpage)
+               kfree(vcpu->arch.kseg0_commpage);
+
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_arch_vcpu_free(vcpu);
+}
+
+int
+kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                   struct kvm_guest_debug *dbg)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int r = 0;
+       sigset_t sigsaved;
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+       if (vcpu->mmio_needed) {
+               if (!vcpu->mmio_is_write)
+                       kvm_mips_complete_mmio_load(vcpu, run);
+               vcpu->mmio_needed = 0;
+       }
+
+       /* Check if we have any exceptions/interrupts pending */
+       kvm_mips_deliver_interrupts(vcpu,
+                                   kvm_read_c0_guest_cause(vcpu->arch.cop0));
+
+       local_irq_disable();
+       kvm_guest_enter();
+
+       r = __kvm_mips_vcpu_run(run, vcpu);
+
+       kvm_guest_exit();
+       local_irq_enable();
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       return r;
+}
+
+int
+kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+       struct kvm_vcpu *dvcpu = NULL;
+
+       if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+               kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
+                         (int)intr);
+
+       if (irq->cpu == -1)
+               dvcpu = vcpu;
+       else
+               dvcpu = vcpu->kvm->vcpus[irq->cpu];
+
+       if (intr == 2 || intr == 3 || intr == 4) {
+               kvm_mips_callbacks->queue_io_int(dvcpu, irq);
+
+       } else if (intr == -2 || intr == -3 || intr == -4) {
+               kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
+       } else {
+               kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+                       irq->cpu, irq->irq);
+               return -EINVAL;
+       }
+
+       dvcpu->arch.wait = 0;
+
+       if (waitqueue_active(&dvcpu->wq)) {
+               wake_up_interruptible(&dvcpu->wq);
+       }
+
+       return 0;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                               struct kvm_mp_state *mp_state)
+{
+       return -EINVAL;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                               struct kvm_mp_state *mp_state)
+{
+       return -EINVAL;
+}
+
+long
+kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       long r;
+       int intr;
+
+       switch (ioctl) {
+       case KVM_NMI:
+               /* Treat the NMI as a CPU reset */
+               r = kvm_mips_reset_vcpu(vcpu);
+               break;
+       case KVM_INTERRUPT:
+               {
+                       struct kvm_mips_interrupt irq;
+                       r = -EFAULT;
+                       if (copy_from_user(&irq, argp, sizeof(irq)))
+                               goto out;
+
+                       intr = (int)irq.irq;
+
+                       kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+                                 irq.irq);
+
+                       r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+                       break;
+               }
+       default:
+               r = -EINVAL;
+       }
+
+out:
+       return r;
+}
+
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+       struct kvm_memory_slot *memslot;
+       unsigned long ga, ga_end;
+       int is_dirty = 0;
+       int r;
+       unsigned long n;
+
+       mutex_lock(&kvm->slots_lock);
+
+       r = kvm_get_dirty_log(kvm, log, &is_dirty);
+       if (r)
+               goto out;
+
+       /* If nothing is dirty, don't bother messing with page tables. */
+       if (is_dirty) {
+               memslot = &kvm->memslots->memslots[log->slot];
+
+               ga = memslot->base_gfn << PAGE_SHIFT;
+               ga_end = ga + (memslot->npages << PAGE_SHIFT);
+
+               printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
+                      ga_end);
+
+               n = kvm_dirty_bitmap_bytes(memslot);
+               memset(memslot->dirty_bitmap, 0, n);
+       }
+
+       r = 0;
+out:
+       mutex_unlock(&kvm->slots_lock);
+       return r;
+
+}
+
+long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       long r;
+
+       switch (ioctl) {
+       default:
+               r = -EINVAL;
+       }
+
+       return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+       int ret;
+
+       if (kvm_mips_callbacks) {
+               kvm_err("kvm: module already exists\n");
+               return -EEXIST;
+       }
+
+       ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
+
+       return ret;
+}
+
+void kvm_arch_exit(void)
+{
+       kvm_mips_callbacks = NULL;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       return -ENOTSUPP;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+       int r;
+
+       switch (ext) {
+       case KVM_CAP_COALESCED_MMIO:
+               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+               break;
+       default:
+               r = 0;
+               break;
+       }
+       return r;
+
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return kvm_mips_pending_timer(vcpu);
+}
+
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+{
+       int i;
+       struct mips_coproc *cop0;
+
+       if (!vcpu)
+               return -1;
+
+       printk("VCPU Register Dump:\n");
+       printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
+       printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+       for (i = 0; i < 32; i += 4) {
+               printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+                      vcpu->arch.gprs[i],
+                      vcpu->arch.gprs[i + 1],
+                      vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
+       }
+       printk("\thi: 0x%08lx\n", vcpu->arch.hi);
+       printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
+
+       cop0 = vcpu->arch.cop0;
+       printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+              kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
+
+       printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       for (i = 0; i < 32; i++)
+               vcpu->arch.gprs[i] = regs->gprs[i];
+
+       vcpu->arch.hi = regs->hi;
+       vcpu->arch.lo = regs->lo;
+       vcpu->arch.pc = regs->pc;
+
+       return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       for (i = 0; i < 32; i++)
+               regs->gprs[i] = vcpu->arch.gprs[i];
+
+       regs->hi = vcpu->arch.hi;
+       regs->lo = vcpu->arch.lo;
+       regs->pc = vcpu->arch.pc;
+
+       return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
+}
+
+void kvm_mips_comparecount_func(unsigned long data)
+{
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+       kvm_mips_callbacks->queue_timer_int(vcpu);
+
+       vcpu->arch.wait = 0;
+       if (waitqueue_active(&vcpu->wq)) {
+               wake_up_interruptible(&vcpu->wq);
+       }
+}
+
+/*
+ * low level hrtimer wake routine.
+ */
+enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+       struct kvm_vcpu *vcpu;
+
+       vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+       kvm_mips_comparecount_func((unsigned long) vcpu);
+       hrtimer_forward_now(&vcpu->arch.comparecount_timer,
+                           ktime_set(0, MS_TO_NS(10)));
+       return HRTIMER_RESTART;
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       kvm_mips_callbacks->vcpu_init(vcpu);
+       hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+                    HRTIMER_MODE_REL);
+       vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+       kvm_mips_init_shadow_tlb(vcpu);
+       return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+       return;
+}
+
+int
+kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
+{
+       return 0;
+}
+
+/* Initial guest state */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       return kvm_mips_callbacks->vcpu_setup(vcpu);
+}
+
+static
+void kvm_mips_set_c0_status(void)
+{
+       uint32_t status = read_c0_status();
+
+       if (cpu_has_fpu)
+               status |= (ST0_CU1);
+
+       if (cpu_has_dsp)
+               status |= (ST0_MX);
+
+       write_c0_status(status);
+       ehb();
+}
+
+/*
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       uint32_t cause = vcpu->arch.host_cp0_cause;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       /* Set a default exit reason */
+       run->exit_reason = KVM_EXIT_UNKNOWN;
+       run->ready_for_interrupt_injection = 1;
+
+       /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
+       kvm_mips_set_c0_status();
+
+       local_irq_enable();
+
+       kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
+                       cause, opc, run, vcpu);
+
+       /* Do a privilege check, if in UM most of these exit conditions end up
+        * causing an exception to be delivered to the Guest Kernel
+        */
+       er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+       if (er == EMULATE_PRIV_FAIL) {
+               goto skip_emul;
+       } else if (er == EMULATE_FAIL) {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               goto skip_emul;
+       }
+
+       switch (exccode) {
+       case T_INT:
+               kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+
+               ++vcpu->stat.int_exits;
+               trace_kvm_exit(vcpu, INT_EXITS);
+
+               if (need_resched()) {
+                       cond_resched();
+               }
+
+               ret = RESUME_GUEST;
+               break;
+
+       case T_COP_UNUSABLE:
+               kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+
+               ++vcpu->stat.cop_unusable_exits;
+               trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
+               ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
+               /* XXXKYMA: Might need to return to user space */
+               if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
+                       ret = RESUME_HOST;
+               }
+               break;
+
+       case T_TLB_MOD:
+               ++vcpu->stat.tlbmod_exits;
+               trace_kvm_exit(vcpu, TLBMOD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
+               break;
+
+       case T_TLB_ST_MISS:
+               kvm_debug
+                   ("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+                    badvaddr);
+
+               ++vcpu->stat.tlbmiss_st_exits;
+               trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
+               break;
+
+       case T_TLB_LD_MISS:
+               kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+                         cause, opc, badvaddr);
+
+               ++vcpu->stat.tlbmiss_ld_exits;
+               trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
+               break;
+
+       case T_ADDR_ERR_ST:
+               ++vcpu->stat.addrerr_st_exits;
+               trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
+               break;
+
+       case T_ADDR_ERR_LD:
+               ++vcpu->stat.addrerr_ld_exits;
+               trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
+               break;
+
+       case T_SYSCALL:
+               ++vcpu->stat.syscall_exits;
+               trace_kvm_exit(vcpu, SYSCALL_EXITS);
+               ret = kvm_mips_callbacks->handle_syscall(vcpu);
+               break;
+
+       case T_RES_INST:
+               ++vcpu->stat.resvd_inst_exits;
+               trace_kvm_exit(vcpu, RESVD_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_res_inst(vcpu);
+               break;
+
+       case T_BREAK:
+               ++vcpu->stat.break_inst_exits;
+               trace_kvm_exit(vcpu, BREAK_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_break(vcpu);
+               break;
+
+       default:
+               kvm_err
+                   ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
+                    exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+                    kvm_read_c0_guest_status(vcpu->arch.cop0));
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+
+       }
+
+skip_emul:
+       local_irq_disable();
+
+       if (er == EMULATE_DONE && !(ret & RESUME_HOST))
+               kvm_mips_deliver_interrupts(vcpu, cause);
+
+       if (!(ret & RESUME_HOST)) {
+               /* Only check for signals if not already exiting to userspace  */
+               if (signal_pending(current)) {
+                       run->exit_reason = KVM_EXIT_INTR;
+                       ret = (-EINTR << 2) | RESUME_HOST;
+                       ++vcpu->stat.signal_exits;
+                       trace_kvm_exit(vcpu, SIGNAL_EXITS);
+               }
+       }
+
+       return ret;
+}
+
+int __init kvm_mips_init(void)
+{
+       int ret;
+
+       ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+       if (ret)
+               return ret;
+
+       /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
+        * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
+        * to avoid the possibility of double faulting. The issue is that the TLB code
+        * references routines that are part of the the KVM module,
+        * which are only available once the module is loaded.
+        */
+       kvm_mips_gfn_to_pfn = gfn_to_pfn;
+       kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
+       kvm_mips_is_error_pfn = is_error_pfn;
+
+       pr_info("KVM/MIPS Initialized\n");
+       return 0;
+}
+
+void __exit kvm_mips_exit(void)
+{
+       kvm_exit();
+
+       kvm_mips_gfn_to_pfn = NULL;
+       kvm_mips_release_pfn_clean = NULL;
+       kvm_mips_is_error_pfn = NULL;
+
+       pr_info("KVM/MIPS unloaded\n");
+}
+
+module_init(kvm_mips_init);
+module_exit(kvm_mips_exit);
+
+EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
new file mode 100644 (file)
index 0000000..a4a8c85
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: commpage: mapped into get kernel space
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __KVM_MIPS_COMMPAGE_H__
+#define __KVM_MIPS_COMMPAGE_H__
+
+struct kvm_mips_commpage {
+       struct mips_coproc cop0;        /* COP0 state is mapped into Guest kernel via commpage */
+};
+
+#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
+
+extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
new file mode 100644 (file)
index 0000000..3873b1e
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* commpage, currently used for Virtual COP0 registers.
+* Mapped into the guest kernel @ 0x0.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_comm.h"
+
+void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
+       memset(page, 0, sizeof(struct kvm_mips_commpage));
+
+       /* Specific init values for fields */
+       vcpu->arch.cop0 = &page->cop0;
+       memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
+
+       return;
+}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
new file mode 100644 (file)
index 0000000..96528e2
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+
+#include "kvm_mips_comm.h"
+
+#define SYNCI_TEMPLATE  0x041f0000
+#define SYNCI_BASE(x)   (((x) >> 21) & 0x1f)
+#define SYNCI_OFFSET    ((x) & 0xffff)
+
+#define LW_TEMPLATE     0x8c000000
+#define CLEAR_TEMPLATE  0x00000020
+#define SW_TEMPLATE     0xac000000
+
+int
+kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+                          struct kvm_vcpu *vcpu)
+{
+       int result = 0;
+       unsigned long kseg0_opc;
+       uint32_t synci_inst = 0x0;
+
+       /* Replace the CACHE instruction, with a NOP */
+       kseg0_opc =
+           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                      (vcpu, (unsigned long) opc));
+       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+       mips32_SyncICache(kseg0_opc, 32);
+
+       return result;
+}
+
+/*
+ *  Address based CACHE instructions are transformed into synci(s). A little heavy
+ * for just D-cache invalidates, but avoids an expensive trap
+ */
+int
+kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+                       struct kvm_vcpu *vcpu)
+{
+       int result = 0;
+       unsigned long kseg0_opc;
+       uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
+
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       synci_inst |= (base << 21);
+       synci_inst |= offset;
+
+       kseg0_opc =
+           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                      (vcpu, (unsigned long) opc));
+       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+       mips32_SyncICache(kseg0_opc, 32);
+
+       return result;
+}
+
+int
+kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       int32_t rt, rd, sel;
+       uint32_t mfc0_inst;
+       unsigned long kseg0_opc, flags;
+
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+
+       if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+               mfc0_inst = CLEAR_TEMPLATE;
+               mfc0_inst |= ((rt & 0x1f) << 16);
+       } else {
+               mfc0_inst = LW_TEMPLATE;
+               mfc0_inst |= ((rt & 0x1f) << 16);
+               mfc0_inst |=
+                   offsetof(struct mips_coproc,
+                            reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
+                                                     cop0);
+       }
+
+       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               kseg0_opc =
+                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                              (vcpu, (unsigned long) opc));
+               memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
+               mips32_SyncICache(kseg0_opc, 32);
+       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
+               mips32_SyncICache((unsigned long) opc, 32);
+               local_irq_restore(flags);
+       } else {
+               kvm_err("%s: Invalid address: %p\n", __func__, opc);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int
+kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       int32_t rt, rd, sel;
+       uint32_t mtc0_inst = SW_TEMPLATE;
+       unsigned long kseg0_opc, flags;
+
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+
+       mtc0_inst |= ((rt & 0x1f) << 16);
+       mtc0_inst |=
+           offsetof(struct mips_coproc,
+                    reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
+
+       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               kseg0_opc =
+                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                              (vcpu, (unsigned long) opc));
+               memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
+               mips32_SyncICache(kseg0_opc, 32);
+       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
+               mips32_SyncICache((unsigned long) opc, 32);
+               local_irq_restore(flags);
+       } else {
+               kvm_err("%s: Invalid address: %p\n", __func__, opc);
+               return -EFAULT;
+       }
+
+       return 0;
+}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
new file mode 100644 (file)
index 0000000..2b2bac9
--- /dev/null
@@ -0,0 +1,1826 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Instruction/Exception emulation
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <linux/random.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-info.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/inst.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#include "trace.h"
+
+/*
+ * Compute the return address and do emulate branch simulation, if required.
+ * This function should be called only in branch delay slot active.
+ */
+unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
+       unsigned long instpc)
+{
+       unsigned int dspcontrol;
+       union mips_instruction insn;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       long epc = instpc;
+       long nextpc = KVM_INVALID_INST;
+
+       if (epc & 3)
+               goto unaligned;
+
+       /*
+        * Read the instruction
+        */
+       insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+
+       if (insn.word == KVM_INVALID_INST)
+               return KVM_INVALID_INST;
+
+       switch (insn.i_format.opcode) {
+               /*
+                * jr and jalr are in r_format format.
+                */
+       case spec_op:
+               switch (insn.r_format.func) {
+               case jalr_op:
+                       arch->gprs[insn.r_format.rd] = epc + 8;
+                       /* Fall through */
+               case jr_op:
+                       nextpc = arch->gprs[insn.r_format.rs];
+                       break;
+               }
+               break;
+
+               /*
+                * This group contains:
+                * bltz_op, bgez_op, bltzl_op, bgezl_op,
+                * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+                */
+       case bcond_op:
+               switch (insn.i_format.rt) {
+               case bltz_op:
+               case bltzl_op:
+                       if ((long)arch->gprs[insn.i_format.rs] < 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bgez_op:
+               case bgezl_op:
+                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bltzal_op:
+               case bltzall_op:
+                       arch->gprs[31] = epc + 8;
+                       if ((long)arch->gprs[insn.i_format.rs] < 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bgezal_op:
+               case bgezall_op:
+                       arch->gprs[31] = epc + 8;
+                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+               case bposge32_op:
+                       if (!cpu_has_dsp)
+                               goto sigill;
+
+                       dspcontrol = rddsp(0x01);
+
+                       if (dspcontrol >= 32) {
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       } else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+               }
+               break;
+
+               /*
+                * These are unconditional and in j_format.
+                */
+       case jal_op:
+               arch->gprs[31] = instpc + 8;
+       case j_op:
+               epc += 4;
+               epc >>= 28;
+               epc <<= 28;
+               epc |= (insn.j_format.target << 2);
+               nextpc = epc;
+               break;
+
+               /*
+                * These are conditional and in i_format.
+                */
+       case beq_op:
+       case beql_op:
+               if (arch->gprs[insn.i_format.rs] ==
+                   arch->gprs[insn.i_format.rt])
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case bne_op:
+       case bnel_op:
+               if (arch->gprs[insn.i_format.rs] !=
+                   arch->gprs[insn.i_format.rt])
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case blez_op:           /* not really i_format */
+       case blezl_op:
+               /* rt field assumed to be zero */
+               if ((long)arch->gprs[insn.i_format.rs] <= 0)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case bgtz_op:
+       case bgtzl_op:
+               /* rt field assumed to be zero */
+               if ((long)arch->gprs[insn.i_format.rs] > 0)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+               /*
+                * And now the FPA/cp1 branch instructions.
+                */
+       case cop1_op:
+               printk("%s: unsupported cop1_op\n", __func__);
+               break;
+       }
+
+       return nextpc;
+
+unaligned:
+       printk("%s: unaligned epc\n", __func__);
+       return nextpc;
+
+sigill:
+       printk("%s: DSP branch but not DSP ASE\n", __func__);
+       return nextpc;
+}
+
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+       unsigned long branch_pc;
+       enum emulation_result er = EMULATE_DONE;
+
+       if (cause & CAUSEF_BD) {
+               branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
+               if (branch_pc == KVM_INVALID_INST) {
+                       er = EMULATE_FAIL;
+               } else {
+                       vcpu->arch.pc = branch_pc;
+                       kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+               }
+       } else
+               vcpu->arch.pc += 4;
+
+       kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+
+       return er;
+}
+
+/* Everytime the compare register is written to, we need to decide when to fire
+ * the timer that represents timer ticks to the GUEST.
+ *
+ */
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+
+       /* If COUNT is enabled */
+       if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
+               hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+               hrtimer_start(&vcpu->arch.comparecount_timer,
+                             ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
+       } else {
+               hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+
+       if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+               kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+                         kvm_read_c0_guest_epc(cop0));
+               kvm_clear_c0_guest_status(cop0, ST0_EXL);
+               vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+
+       } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+               kvm_clear_c0_guest_status(cop0, ST0_ERL);
+               vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+       } else {
+               printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+                      vcpu->arch.pc);
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+
+       kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
+                 vcpu->arch.pending_exceptions);
+
+       ++vcpu->stat.wait_exits;
+       trace_kvm_exit(vcpu, WAIT_EXITS);
+       if (!vcpu->arch.pending_exceptions) {
+               vcpu->arch.wait = 1;
+               kvm_vcpu_block(vcpu);
+
+               /* We we are runnable, then definitely go off to user space to check if any
+                * I/O interrupts are pending.
+                */
+               if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+                       clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+                       vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+               }
+       }
+
+       return er;
+}
+
+/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
+ * this, if things ever change
+ */
+enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_FAIL;
+       uint32_t pc = vcpu->arch.pc;
+
+       printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+       return er;
+}
+
+/* Write Guest TLB Entry @ Index */
+enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int index = kvm_read_c0_guest_index(cop0);
+       enum emulation_result er = EMULATE_DONE;
+       struct kvm_mips_tlb *tlb = NULL;
+       uint32_t pc = vcpu->arch.pc;
+
+       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+               printk("%s: illegal index: %d\n", __func__, index);
+               printk
+                   ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+                    pc, index, kvm_read_c0_guest_entryhi(cop0),
+                    kvm_read_c0_guest_entrylo0(cop0),
+                    kvm_read_c0_guest_entrylo1(cop0),
+                    kvm_read_c0_guest_pagemask(cop0));
+               index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
+       }
+
+       tlb = &vcpu->arch.guest_tlb[index];
+#if 1
+       /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+       kvm_debug
+           ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+            pc, index, kvm_read_c0_guest_entryhi(cop0),
+            kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
+            kvm_read_c0_guest_pagemask(cop0));
+
+       return er;
+}
+
+/* Write Guest TLB Entry @ Random Index */
+enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       struct kvm_mips_tlb *tlb = NULL;
+       uint32_t pc = vcpu->arch.pc;
+       int index;
+
+#if 1
+       get_random_bytes(&index, sizeof(index));
+       index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
+#else
+       index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
+#endif
+
+       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+               printk("%s: illegal index: %d\n", __func__, index);
+               return EMULATE_FAIL;
+       }
+
+       tlb = &vcpu->arch.guest_tlb[index];
+
+#if 1
+       /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+       kvm_debug
+           ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+            pc, index, kvm_read_c0_guest_entryhi(cop0),
+            kvm_read_c0_guest_entrylo0(cop0),
+            kvm_read_c0_guest_entrylo1(cop0));
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       long entryhi = kvm_read_c0_guest_entryhi(cop0);
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t pc = vcpu->arch.pc;
+       int index = -1;
+
+       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+
+       kvm_write_c0_guest_index(cop0, index);
+
+       kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+                 index);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
+                    struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       int32_t rt, rd, copz, sel, co_bit, op;
+       uint32_t pc = vcpu->arch.pc;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL) {
+               return er;
+       }
+
+       copz = (inst >> 21) & 0x1f;
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+       co_bit = (inst >> 25) & 1;
+
+       /* Verify that the register is valid */
+       if (rd > MIPS_CP0_DESAVE) {
+               printk("Invalid rd: %d\n", rd);
+               er = EMULATE_FAIL;
+               goto done;
+       }
+
+       if (co_bit) {
+               op = (inst) & 0xff;
+
+               switch (op) {
+               case tlbr_op:   /*  Read indexed TLB entry  */
+                       er = kvm_mips_emul_tlbr(vcpu);
+                       break;
+               case tlbwi_op:  /*  Write indexed  */
+                       er = kvm_mips_emul_tlbwi(vcpu);
+                       break;
+               case tlbwr_op:  /*  Write random  */
+                       er = kvm_mips_emul_tlbwr(vcpu);
+                       break;
+               case tlbp_op:   /* TLB Probe */
+                       er = kvm_mips_emul_tlbp(vcpu);
+                       break;
+               case rfe_op:
+                       printk("!!!COP0_RFE!!!\n");
+                       break;
+               case eret_op:
+                       er = kvm_mips_emul_eret(vcpu);
+                       goto dont_update_pc;
+                       break;
+               case wait_op:
+                       er = kvm_mips_emul_wait(vcpu);
+                       break;
+               }
+       } else {
+               switch (copz) {
+               case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[rd][sel]++;
+#endif
+                       /* Get reg */
+                       if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+                               /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
+                               vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
+                       } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+                               vcpu->arch.gprs[rt] = 0x0;
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+                       }
+                       else {
+                               vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+                       }
+
+                       kvm_debug
+                           ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
+                            pc, rd, sel, rt, vcpu->arch.gprs[rt]);
+
+                       break;
+
+               case dmfc_op:
+                       vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+                       break;
+
+               case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[rd][sel]++;
+#endif
+                       if ((rd == MIPS_CP0_TLB_INDEX)
+                           && (vcpu->arch.gprs[rt] >=
+                               KVM_MIPS_GUEST_TLB_SIZE)) {
+                               printk("Invalid TLB Index: %ld",
+                                      vcpu->arch.gprs[rt]);
+                               er = EMULATE_FAIL;
+                               break;
+                       }
+#define C0_EBASE_CORE_MASK 0xff
+                       if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
+                               /* Preserve CORE number */
+                               kvm_change_c0_guest_ebase(cop0,
+                                                         ~(C0_EBASE_CORE_MASK),
+                                                         vcpu->arch.gprs[rt]);
+                               printk("MTCz, cop0->reg[EBASE]: %#lx\n",
+                                      kvm_read_c0_guest_ebase(cop0));
+                       } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
+                               uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
+                               if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
+                                   &&
+                                   (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
+                                     != nasid)) {
+
+                                       kvm_debug
+                                           ("MTCz, change ASID from %#lx to %#lx\n",
+                                            ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
+                                            ASID_MASK(vcpu->arch.gprs[rt]));
+
+                                       /* Blow away the shadow host TLBs */
+                                       kvm_mips_flush_host_tlb(1);
+                               }
+                               kvm_write_c0_guest_entryhi(cop0,
+                                                          vcpu->arch.gprs[rt]);
+                       }
+                       /* Are we writing to COUNT */
+                       else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+                               /* Linux doesn't seem to write into COUNT, we throw an error
+                                * if we notice a write to COUNT
+                                */
+                               /*er = EMULATE_FAIL; */
+                               goto done;
+                       } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
+                               kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
+                                         pc, kvm_read_c0_guest_compare(cop0),
+                                         vcpu->arch.gprs[rt]);
+
+                               /* If we are writing to COMPARE */
+                               /* Clear pending timer interrupt, if any */
+                               kvm_mips_callbacks->dequeue_timer_int(vcpu);
+                               kvm_write_c0_guest_compare(cop0,
+                                                          vcpu->arch.gprs[rt]);
+                       } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+                               kvm_write_c0_guest_status(cop0,
+                                                         vcpu->arch.gprs[rt]);
+                               /* Make sure that CU1 and NMI bits are never set */
+                               kvm_clear_c0_guest_status(cop0,
+                                                         (ST0_CU1 | ST0_NMI));
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+                       } else {
+                               cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+                       }
+
+                       kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
+                                 rd, sel, cop0->reg[rd][sel]);
+                       break;
+
+               case dmtc_op:
+                       printk
+                           ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
+                            vcpu->arch.pc, rt, rd, sel);
+                       er = EMULATE_FAIL;
+                       break;
+
+               case mfmcz_op:
+#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[MIPS_CP0_STATUS][0]++;
+#endif
+                       if (rt != 0) {
+                               vcpu->arch.gprs[rt] =
+                                   kvm_read_c0_guest_status(cop0);
+                       }
+                       /* EI */
+                       if (inst & 0x20) {
+                               kvm_debug("[%#lx] mfmcz_op: EI\n",
+                                         vcpu->arch.pc);
+                               kvm_set_c0_guest_status(cop0, ST0_IE);
+                       } else {
+                               kvm_debug("[%#lx] mfmcz_op: DI\n",
+                                         vcpu->arch.pc);
+                               kvm_clear_c0_guest_status(cop0, ST0_IE);
+                       }
+
+                       break;
+
+               case wrpgpr_op:
+                       {
+                               uint32_t css =
+                                   cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+                               uint32_t pss =
+                                   (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
+                               /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
+                               if (css || pss) {
+                                       er = EMULATE_FAIL;
+                                       break;
+                               }
+                               kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
+                                         vcpu->arch.gprs[rt]);
+                               vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
+                       }
+                       break;
+               default:
+                       printk
+                           ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
+                            vcpu->arch.pc, copz);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+       }
+
+done:
+       /*
+        * Rollback PC only if emulation was unsuccessful
+        */
+       if (er == EMULATE_FAIL) {
+               vcpu->arch.pc = curr_pc;
+       }
+
+dont_update_pc:
+       /*
+        * This is for special instructions whose emulation
+        * updates the PC, so do not overwrite the PC under
+        * any circumstances
+        */
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+                      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DO_MMIO;
+       int32_t op, base, rt, offset;
+       uint32_t bytes;
+       void *data = run->mmio.data;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       rt = (inst >> 16) & 0x1f;
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       op = (inst >> 26) & 0x3f;
+
+       switch (op) {
+       case sb_op:
+               bytes = 1;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(u8 *) data = vcpu->arch.gprs[rt];
+               kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
+                         *(uint8_t *) data);
+
+               break;
+
+       case sw_op:
+               bytes = 4;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(uint32_t *) data = vcpu->arch.gprs[rt];
+
+               kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+                         vcpu->arch.gprs[rt], *(uint32_t *) data);
+               break;
+
+       case sh_op:
+               bytes = 2;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(uint16_t *) data = vcpu->arch.gprs[rt];
+
+               kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+                         vcpu->arch.gprs[rt], *(uint32_t *) data);
+               break;
+
+       default:
+               printk("Store not yet supported");
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       /*
+        * Rollback PC if emulation was unsuccessful
+        */
+       if (er == EMULATE_FAIL) {
+               vcpu->arch.pc = curr_pc;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+                     struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DO_MMIO;
+       int32_t op, base, rt, offset;
+       uint32_t bytes;
+
+       rt = (inst >> 16) & 0x1f;
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       op = (inst >> 26) & 0x3f;
+
+       vcpu->arch.pending_load_cause = cause;
+       vcpu->arch.io_gpr = rt;
+
+       switch (op) {
+       case lw_op:
+               bytes = 4;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 0;
+               break;
+
+       case lh_op:
+       case lhu_op:
+               bytes = 2;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 0;
+
+               if (op == lh_op)
+                       vcpu->mmio_needed = 2;
+               else
+                       vcpu->mmio_needed = 1;
+
+               break;
+
+       case lbu_op:
+       case lb_op:
+               bytes = 1;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_is_write = 0;
+
+               if (op == lb_op)
+                       vcpu->mmio_needed = 2;
+               else
+                       vcpu->mmio_needed = 1;
+
+               break;
+
+       default:
+               printk("Load not yet supported");
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
+{
+       unsigned long offset = (va & ~PAGE_MASK);
+       struct kvm *kvm = vcpu->kvm;
+       unsigned long pa;
+       gfn_t gfn;
+       pfn_t pfn;
+
+       gfn = va >> PAGE_SHIFT;
+
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               return -1;
+       }
+       pfn = kvm->arch.guest_pmap[gfn];
+       pa = (pfn << PAGE_SHIFT) | offset;
+
+       printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
+
+       mips32_SyncICache(CKSEG0ADDR(pa), 32);
+       return 0;
+}
+
+#define MIPS_CACHE_OP_INDEX_INV         0x0
+#define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
+#define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
+#define MIPS_CACHE_OP_IMP               0x3
+#define MIPS_CACHE_OP_HIT_INV           0x4
+#define MIPS_CACHE_OP_FILL_WB_INV       0x5
+#define MIPS_CACHE_OP_HIT_HB            0x6
+#define MIPS_CACHE_OP_FETCH_LOCK        0x7
+
+#define MIPS_CACHE_ICACHE               0x0
+#define MIPS_CACHE_DCACHE               0x1
+#define MIPS_CACHE_SEC                  0x3
+
+enum emulation_result
+kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
+                      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       extern void (*r4k_blast_dcache) (void);
+       extern void (*r4k_blast_icache) (void);
+       enum emulation_result er = EMULATE_DONE;
+       int32_t offset, cache, op_inst, op, base;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long va;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       base = (inst >> 21) & 0x1f;
+       op_inst = (inst >> 16) & 0x1f;
+       offset = inst & 0xffff;
+       cache = (inst >> 16) & 0x3;
+       op = (inst >> 18) & 0x7;
+
+       va = arch->gprs[base] + offset;
+
+       kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                 cache, op, base, arch->gprs[base], offset);
+
+       /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
+        * the caches entirely by stepping through all the ways/indexes
+        */
+       if (op == MIPS_CACHE_OP_INDEX_INV) {
+               kvm_debug
+                   ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                    vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
+                    arch->gprs[base], offset);
+
+               if (cache == MIPS_CACHE_DCACHE)
+                       r4k_blast_dcache();
+               else if (cache == MIPS_CACHE_ICACHE)
+                       r4k_blast_icache();
+               else {
+                       printk("%s: unsupported CACHE INDEX operation\n",
+                              __func__);
+                       return EMULATE_FAIL;
+               }
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               kvm_mips_trans_cache_index(inst, opc, vcpu);
+#endif
+               goto done;
+       }
+
+       preempt_disable();
+       if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+
+               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
+                       kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+               }
+       } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+                  KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+               int index;
+
+               /* If an entry already exists then skip */
+               if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
+                       goto skip_fault;
+               }
+
+               /* If address not in the guest TLB, then give the guest a fault, the
+                * resulting handler will do the right thing
+                */
+               index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
+                                                 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+
+               if (index < 0) {
+                       vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
+                       vcpu->arch.host_cp0_badvaddr = va;
+                       er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
+                                                        vcpu);
+                       preempt_enable();
+                       goto dont_update_pc;
+               } else {
+                       struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+                       /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+                       if (!TLB_IS_VALID(*tlb, va)) {
+                               er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
+                                                               run, vcpu);
+                               preempt_enable();
+                               goto dont_update_pc;
+                       } else {
+                               /* We fault an entry from the guest tlb to the shadow host TLB */
+                               kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+                                                                    NULL,
+                                                                    NULL);
+                       }
+               }
+       } else {
+               printk
+                   ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                    cache, op, base, arch->gprs[base], offset);
+               er = EMULATE_FAIL;
+               preempt_enable();
+               goto dont_update_pc;
+
+       }
+
+skip_fault:
+       /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
+       if (cache == MIPS_CACHE_DCACHE
+           && (op == MIPS_CACHE_OP_FILL_WB_INV
+               || op == MIPS_CACHE_OP_HIT_INV)) {
+               flush_dcache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
+               kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+       } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
+               flush_dcache_line(va);
+               flush_icache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               /* Replace the CACHE instruction, with a SYNCI */
+               kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+       } else {
+               printk
+                   ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                    cache, op, base, arch->gprs[base], offset);
+               er = EMULATE_FAIL;
+               preempt_enable();
+               goto dont_update_pc;
+       }
+
+       preempt_enable();
+
+      dont_update_pc:
+       /*
+        * Rollback PC
+        */
+       vcpu->arch.pc = curr_pc;
+      done:
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+                     struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t inst;
+
+       /*
+        *  Fetch the instruction.
+        */
+       if (cause & CAUSEF_BD) {
+               opc += 1;
+       }
+
+       inst = kvm_get_inst(opc, vcpu);
+
+       switch (((union mips_instruction)inst).r_format.opcode) {
+       case cop0_op:
+               er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
+               break;
+       case sb_op:
+       case sh_op:
+       case sw_op:
+               er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+               break;
+       case lb_op:
+       case lbu_op:
+       case lhu_op:
+       case lh_op:
+       case lw_op:
+               er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+               break;
+
+       case cache_op:
+               ++vcpu->stat.cache_exits;
+               trace_kvm_exit(vcpu, CACHE_EXITS);
+               er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
+               break;
+
+       default:
+               printk("Instruction emulation not supported (%p/%#x)\n", opc,
+                      inst);
+               kvm_arch_vcpu_dump_regs(vcpu);
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
+                        struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_SYSCALL << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               printk("Trying to deliver SYSCALL when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
+                           struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
+                               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* set pc to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x0;
+
+       } else {
+               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
+                          struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi =
+               (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
+                         arch->pc);
+
+               /* set pc to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
+                           struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x0;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
+                          struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+/* TLBMOD: store into address matching TLB with Dirty bit off */
+enum emulation_result
+kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+                      struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+
+#ifdef DEBUG
+       /*
+        * If address not in the guest TLB, then we are in trouble
+        */
+       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+       if (index < 0) {
+               /* XXXKYMA Invalidate and retry */
+               kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
+               kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
+                    __func__, entryhi);
+               kvm_mips_dump_guest_tlbs(vcpu);
+               kvm_mips_dump_host_tlbs();
+               return EMULATE_FAIL;
+       }
+#endif
+
+       er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
+                         arch->pc);
+
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
+                        struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+       }
+
+       arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+       kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_RES_INST << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_err("Trying to deliver RI when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_BREAK << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               printk("Trying to deliver BP when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+/*
+ * ll/sc, rdhwr, sync emulation
+ */
+
+#define OPCODE 0xfc000000
+#define BASE   0x03e00000
+#define RT     0x001f0000
+#define OFFSET 0x0000ffff
+#define LL     0xc0000000
+#define SC     0xe0000000
+#define SPEC0  0x00000000
+#define SPEC3  0x7c000000
+#define RD     0x0000f800
+#define FUNC   0x0000003f
+#define SYNC   0x0000000f
+#define RDHWR  0x0000003b
+
+enum emulation_result
+kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+                  struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long curr_pc;
+       uint32_t inst;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       /*
+        *  Fetch the instruction.
+        */
+       if (cause & CAUSEF_BD)
+               opc += 1;
+
+       inst = kvm_get_inst(opc, vcpu);
+
+       if (inst == KVM_INVALID_INST) {
+               printk("%s: Cannot get inst @ %p\n", __func__, opc);
+               return EMULATE_FAIL;
+       }
+
+       if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+               int rd = (inst & RD) >> 11;
+               int rt = (inst & RT) >> 16;
+               switch (rd) {
+               case 0: /* CPU number */
+                       arch->gprs[rt] = 0;
+                       break;
+               case 1: /* SYNCI length */
+                       arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
+                                            current_cpu_data.icache.linesz);
+                       break;
+               case 2: /* Read count register */
+                       printk("RDHWR: Cont register\n");
+                       arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
+                       break;
+               case 3: /* Count register resolution */
+                       switch (current_cpu_data.cputype) {
+                       case CPU_20KC:
+                       case CPU_25KF:
+                               arch->gprs[rt] = 1;
+                               break;
+                       default:
+                               arch->gprs[rt] = 2;
+                       }
+                       break;
+               case 29:
+#if 1
+                       arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
+#else
+                       /* UserLocal not implemented */
+                       er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+#endif
+                       break;
+
+               default:
+                       printk("RDHWR not supported\n");
+                       er = EMULATE_FAIL;
+                       break;
+               }
+       } else {
+               printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
+               er = EMULATE_FAIL;
+       }
+
+       /*
+        * Rollback PC only if emulation was unsuccessful
+        */
+       if (er == EMULATE_FAIL) {
+               vcpu->arch.pc = curr_pc;
+       }
+       return er;
+}
+
+enum emulation_result
+kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long curr_pc;
+
+       if (run->mmio.len > sizeof(*gpr)) {
+               printk("Bad MMIO length: %d", run->mmio.len);
+               er = EMULATE_FAIL;
+               goto done;
+       }
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, vcpu->arch.pending_load_cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       switch (run->mmio.len) {
+       case 4:
+               *gpr = *(int32_t *) run->mmio.data;
+               break;
+
+       case 2:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(int16_t *) run->mmio.data;
+               else
+                       *gpr = *(int16_t *) run->mmio.data;
+
+               break;
+       case 1:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(int8_t *) run->mmio.data;
+               else
+                       *gpr = *(u8 *) run->mmio.data;
+               break;
+       }
+
+       if (vcpu->arch.pending_load_cause & CAUSEF_BD)
+               kvm_debug
+                   ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+                    vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+                    vcpu->mmio_needed);
+
+done:
+       return er;
+}
+
+static enum emulation_result
+kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
+                    struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (exccode << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+               kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+
+               kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
+                         exccode, kvm_read_c0_guest_epc(cop0),
+                         kvm_read_c0_guest_badvaddr(cop0));
+       } else {
+               printk("Trying to deliver EXC when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result
+kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
+                        struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+
+       int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+
+       if (usermode) {
+               switch (exccode) {
+               case T_INT:
+               case T_SYSCALL:
+               case T_BREAK:
+               case T_RES_INST:
+                       break;
+
+               case T_COP_UNUSABLE:
+                       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
+                               er = EMULATE_PRIV_FAIL;
+                       break;
+
+               case T_TLB_MOD:
+                       break;
+
+               case T_TLB_LD_MISS:
+                       /* We we are accessing Guest kernel space, then send an address error exception to the guest */
+                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+                               printk("%s: LD MISS @ %#lx\n", __func__,
+                                      badvaddr);
+                               cause &= ~0xff;
+                               cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+                               er = EMULATE_PRIV_FAIL;
+                       }
+                       break;
+
+               case T_TLB_ST_MISS:
+                       /* We we are accessing Guest kernel space, then send an address error exception to the guest */
+                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+                               printk("%s: ST MISS @ %#lx\n", __func__,
+                                      badvaddr);
+                               cause &= ~0xff;
+                               cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+                               er = EMULATE_PRIV_FAIL;
+                       }
+                       break;
+
+               case T_ADDR_ERR_ST:
+                       printk("%s: address error ST @ %#lx\n", __func__,
+                              badvaddr);
+                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+                               cause &= ~0xff;
+                               cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+                       }
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               case T_ADDR_ERR_LD:
+                       printk("%s: address error LD @ %#lx\n", __func__,
+                              badvaddr);
+                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+                               cause &= ~0xff;
+                               cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+                       }
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               default:
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               }
+       }
+
+       if (er == EMULATE_PRIV_FAIL) {
+               kvm_mips_emulate_exc(cause, opc, run, vcpu);
+       }
+       return er;
+}
+
+/* User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ *     case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ *     case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+enum emulation_result
+kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
+                       struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       unsigned long va = vcpu->arch.host_cp0_badvaddr;
+       int index;
+
+       kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
+                 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+
+       /* KVM would not have got the exception if this entry was valid in the shadow host TLB
+        * Check the Guest TLB, if the entry is not there then send the guest an
+        * exception. The guest exc handler should then inject an entry into the
+        * guest TLB
+        */
+       index = kvm_mips_guest_tlb_lookup(vcpu,
+                                         (va & VPN2_MASK) |
+                                         ASID_MASK(kvm_read_c0_guest_entryhi
+                                          (vcpu->arch.cop0)));
+       if (index < 0) {
+               if (exccode == T_TLB_LD_MISS) {
+                       er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
+               } else if (exccode == T_TLB_ST_MISS) {
+                       er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
+               } else {
+                       printk("%s: invalid exc code: %d\n", __func__, exccode);
+                       er = EMULATE_FAIL;
+               }
+       } else {
+               struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+
+               /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+               if (!TLB_IS_VALID(*tlb, va)) {
+                       if (exccode == T_TLB_LD_MISS) {
+                               er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
+                                                               vcpu);
+                       } else if (exccode == T_TLB_ST_MISS) {
+                               er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
+                                                               vcpu);
+                       } else {
+                               printk("%s: invalid exc code: %d\n", __func__,
+                                      exccode);
+                               er = EMULATE_FAIL;
+                       }
+               } else {
+#ifdef DEBUG
+                       kvm_debug
+                           ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
+                            tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+                       /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
+                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+                                                            NULL);
+               }
+       }
+
+       return er;
+}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
new file mode 100644 (file)
index 0000000..1e5de16
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupt delivery
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+       set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+       clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+       /* Cause bits to reflect the pending timer interrupt,
+        * the EXC code will be set when we are actually
+        * delivering the interrupt:
+        */
+       kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+
+       /* Queue up an INT exception for the core */
+       kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+}
+
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+       kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+       kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+void
+kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+
+       /* Cause bits to reflect the pending IO interrupt,
+        * the EXC code will be set when we are actually
+        * delivering the interrupt:
+        */
+       switch (intr) {
+       case 2:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+               /* Queue up an INT exception for the core */
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
+               break;
+
+       case 3:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+               break;
+
+       case 4:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+               break;
+
+       default:
+               break;
+       }
+
+}
+
+void
+kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+                          struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+       switch (intr) {
+       case -2:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
+               break;
+
+       case -3:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+               break;
+
+       case -4:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+               break;
+
+       default:
+               break;
+       }
+
+}
+
+/* Deliver the interrupt of the corresponding priority, if possible. */
+int
+kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                       uint32_t cause)
+{
+       int allowed = 0;
+       uint32_t exccode;
+
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       switch (priority) {
+       case MIPS_EXC_INT_TIMER:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IO:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IPI_1:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IPI_2:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       /* Are we allowed to deliver the interrupt ??? */
+       if (allowed) {
+
+               if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+                       /* save old pc */
+                       kvm_write_c0_guest_epc(cop0, arch->pc);
+                       kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+                       if (cause & CAUSEF_BD)
+                               kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+                       else
+                               kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+                       kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
+
+               } else
+                       kvm_err("Trying to deliver interrupt when EXL is already set\n");
+
+               kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
+                                         (exccode << CAUSEB_EXCCODE));
+
+               /* XXXSL Set PC to the interrupt exception entry point */
+               if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
+                       arch->pc = KVM_GUEST_KSEG0 + 0x200;
+               else
+                       arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+               clear_bit(priority, &vcpu->arch.pending_exceptions);
+       }
+
+       return allowed;
+}
+
+int
+kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                     uint32_t cause)
+{
+       return 1;
+}
+
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+       unsigned long *pending = &vcpu->arch.pending_exceptions;
+       unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
+       unsigned int priority;
+
+       if (!(*pending) && !(*pending_clr))
+               return;
+
+       priority = __ffs(*pending_clr);
+       while (priority <= MIPS_EXC_MAX) {
+               if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
+                       if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
+                               break;
+               }
+
+               priority = find_next_bit(pending_clr,
+                                        BITS_PER_BYTE * sizeof(*pending_clr),
+                                        priority + 1);
+       }
+
+       priority = __ffs(*pending);
+       while (priority <= MIPS_EXC_MAX) {
+               if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
+                       if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
+                               break;
+               }
+
+               priority = find_next_bit(pending,
+                                        BITS_PER_BYTE * sizeof(*pending),
+                                        priority + 1);
+       }
+
+}
+
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
+}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
new file mode 100644 (file)
index 0000000..20da7d2
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupts
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
+ * for the guest in the order specified by their priorities
+ */
+
+#define MIPS_EXC_RESET              0
+#define MIPS_EXC_SRESET             1
+#define MIPS_EXC_DEBUG_ST           2
+#define MIPS_EXC_DEBUG              3
+#define MIPS_EXC_DDB                4
+#define MIPS_EXC_NMI                5
+#define MIPS_EXC_MCHK               6
+#define MIPS_EXC_INT_TIMER          7
+#define MIPS_EXC_INT_IO             8
+#define MIPS_EXC_EXECUTE            9
+#define MIPS_EXC_INT_IPI_1          10
+#define MIPS_EXC_INT_IPI_2          11
+#define MIPS_EXC_MAX                12
+/* XXXSL More to follow */
+
+#define C_TI        (_ULCAST_(1) << 30)
+
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE   (0)
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+                             struct kvm_mips_interrupt *irq);
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+                               struct kvm_mips_interrupt *irq);
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                           uint32_t cause);
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                         uint32_t cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
new file mode 100644 (file)
index 0000000..86d3b4c
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/*
+ * Define opcode values not defined in <asm/isnt.h>
+ */
+
+#ifndef __KVM_MIPS_OPCODE_H__
+#define __KVM_MIPS_OPCODE_H__
+
+/* COP0 Ops */
+#define     mfmcz_op         0x0b      /*  01011  */
+#define     wrpgpr_op        0x0e      /*  01110  */
+
+/*  COP0 opcodes (only if COP0 and CO=1):  */
+#define     wait_op               0x20 /*  100000  */
+
+#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
new file mode 100644 (file)
index 0000000..075904b
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: COP0 access histogram
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/kvm_host.h>
+
+char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
+       "WAIT",
+       "CACHE",
+       "Signal",
+       "Interrupt",
+       "COP0/1 Unusable",
+       "TLB Mod",
+       "TLB Miss (LD)",
+       "TLB Miss (ST)",
+       "Address Err (ST)",
+       "Address Error (LD)",
+       "System Call",
+       "Reserved Inst",
+       "Break Inst",
+       "D-Cache Flushes",
+};
+
+char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
+       "Index",
+       "Random",
+       "EntryLo0",
+       "EntryLo1",
+       "Context",
+       "PG Mask",
+       "Wired",
+       "HWREna",
+       "BadVAddr",
+       "Count",
+       "EntryHI",
+       "Compare",
+       "Status",
+       "Cause",
+       "EXC PC",
+       "PRID",
+       "Config",
+       "LLAddr",
+       "Watch Lo",
+       "Watch Hi",
+       "X Context",
+       "Reserved",
+       "Impl Dep",
+       "Debug",
+       "DEPC",
+       "PerfCnt",
+       "ErrCtl",
+       "CacheErr",
+       "TagLo",
+       "TagHi",
+       "ErrorEPC",
+       "DESAVE"
+};
+
+int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+       int i, j;
+
+       printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+       for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
+               for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
+                       if (vcpu->arch.cop0->stat[i][j])
+                               printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+                                      vcpu->arch.cop0->stat[i][j]);
+               }
+       }
+#endif
+
+       return 0;
+}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
new file mode 100644 (file)
index 0000000..89511a9
--- /dev/null
@@ -0,0 +1,928 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
+* TLB handlers run from KSEG0
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#define KVM_GUEST_PC_TLB    0
+#define KVM_GUEST_SP_TLB    1
+
+#define PRIx64 "llx"
+
+/* Use VZ EntryHi.EHINV to invalidate TLB entries */
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+atomic_t kvm_mips_instance;
+EXPORT_SYMBOL(kvm_mips_instance);
+
+/* These function pointers are initialized once the KVM module is loaded */
+pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
+
+void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
+
+bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_is_error_pfn);
+
+uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+       return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
+}
+
+
+uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+       return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
+}
+
+inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
+{
+       return vcpu->kvm->arch.commpage_tlb;
+}
+
+
+/*
+ * Structure defining an tlb entry data set.
+ */
+
+void kvm_mips_dump_host_tlbs(void)
+{
+       unsigned long old_entryhi;
+       unsigned long old_pagemask;
+       struct kvm_mips_tlb tlb;
+       unsigned long flags;
+       int i;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       printk("HOST TLBs:\n");
+       printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
+
+       for (i = 0; i < current_cpu_data.tlbsize; i++) {
+               write_c0_index(i);
+               mtc0_tlbw_hazard();
+
+               tlb_read();
+               tlbw_use_hazard();
+
+               tlb.tlb_hi = read_c0_entryhi();
+               tlb.tlb_lo0 = read_c0_entrylo0();
+               tlb.tlb_lo1 = read_c0_entrylo1();
+               tlb.tlb_mask = read_c0_pagemask();
+
+               printk("TLB%c%3d Hi 0x%08lx ",
+                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                      i, tlb.tlb_hi);
+               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo0 >> 3) & 7);
+               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       local_irq_restore(flags);
+}
+
+void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_mips_tlb tlb;
+       int i;
+
+       printk("Guest TLBs:\n");
+       printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
+
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               tlb = vcpu->arch.guest_tlb[i];
+               printk("TLB%c%3d Hi 0x%08lx ",
+                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                      i, tlb.tlb_hi);
+               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo0 >> 3) & 7);
+               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+}
+
+void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
+{
+       int i;
+       volatile struct kvm_mips_tlb tlb;
+
+       printk("Shadow TLBs:\n");
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
+               printk("TLB%c%3d Hi 0x%08lx ",
+                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                      i, tlb.tlb_hi);
+               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo0 >> 3) & 7);
+               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+}
+
+static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+       pfn_t pfn;
+
+       if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+               return;
+
+       pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+
+       if (kvm_mips_is_error_pfn(pfn)) {
+               panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+       }
+
+       kvm->arch.guest_pmap[gfn] = pfn;
+       return;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+       unsigned long gva)
+{
+       gfn_t gfn;
+       uint32_t offset = gva & ~PAGE_MASK;
+       struct kvm *kvm = vcpu->kvm;
+
+       if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+               kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+                       __builtin_return_address(0), gva);
+               return KVM_INVALID_PAGE;
+       }
+
+       gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+                       gva);
+               return KVM_INVALID_PAGE;
+       }
+       kvm_mips_map_page(vcpu->kvm, gfn);
+       return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+/* set flush_dcache_mask == 0 if no dcache flush required */
+int
+kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+       unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
+{
+       unsigned long flags;
+       unsigned long old_entryhi;
+       volatile int idx;
+
+       local_irq_save(flags);
+
+
+       old_entryhi = read_c0_entryhi();
+       write_c0_entryhi(entryhi);
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       if (idx > current_cpu_data.tlbsize) {
+               kvm_err("%s: Invalid Index: %d\n", __func__, idx);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+
+       if (idx < 0) {
+               idx = read_c0_random() % current_cpu_data.tlbsize;
+               write_c0_index(idx);
+               mtc0_tlbw_hazard();
+       }
+       write_c0_entrylo0(entrylo0);
+       write_c0_entrylo1(entrylo1);
+       mtc0_tlbw_hazard();
+
+       tlb_write_indexed();
+       tlbw_use_hazard();
+
+#ifdef DEBUG
+       if (debug) {
+               kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
+                         "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+                         vcpu->arch.pc, idx, read_c0_entryhi(),
+                         read_c0_entrylo0(), read_c0_entrylo1());
+       }
+#endif
+
+       /* Flush D-cache */
+       if (flush_dcache_mask) {
+               if (entrylo0 & MIPS3_PG_V) {
+                       ++vcpu->stat.flush_dcache_exits;
+                       flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
+               }
+               if (entrylo1 & MIPS3_PG_V) {
+                       ++vcpu->stat.flush_dcache_exits;
+                       flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
+                               (0x1 << PAGE_SHIFT));
+               }
+       }
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+       local_irq_restore(flags);
+       return 0;
+}
+
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+       struct kvm_vcpu *vcpu)
+{
+       gfn_t gfn;
+       pfn_t pfn0, pfn1;
+       unsigned long vaddr = 0;
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       int even;
+       struct kvm *kvm = vcpu->kvm;
+       const int flush_dcache_mask = 0;
+
+
+       if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+               kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+
+       gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+                       gfn, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+       even = !(gfn & 0x1);
+       vaddr = badvaddr & (PAGE_MASK << 1);
+
+       kvm_mips_map_page(vcpu->kvm, gfn);
+       kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
+
+       if (even) {
+               pfn0 = kvm->arch.guest_pmap[gfn];
+               pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
+       } else {
+               pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
+               pfn1 = kvm->arch.guest_pmap[gfn];
+       }
+
+       entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+                       (0x1 << 1);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+                       (0x1 << 1);
+
+       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+                                      flush_dcache_mask);
+}
+
+int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+       struct kvm_vcpu *vcpu)
+{
+       pfn_t pfn0, pfn1;
+       unsigned long flags, old_entryhi = 0, vaddr = 0;
+       unsigned long entrylo0 = 0, entrylo1 = 0;
+
+
+       pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
+       pfn1 = 0;
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+                       (0x1 << 1);
+       entrylo1 = 0;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       vaddr = badvaddr & (PAGE_MASK << 1);
+       write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
+       mtc0_tlbw_hazard();
+       write_c0_entrylo0(entrylo0);
+       mtc0_tlbw_hazard();
+       write_c0_entrylo1(entrylo1);
+       mtc0_tlbw_hazard();
+       write_c0_index(kvm_mips_get_commpage_asid(vcpu));
+       mtc0_tlbw_hazard();
+       tlb_write_indexed();
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+#ifdef DEBUG
+       kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+            vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
+            read_c0_entrylo0(), read_c0_entrylo1());
+#endif
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+       local_irq_restore(flags);
+
+       return 0;
+}
+
+int
+kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+       struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
+{
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       struct kvm *kvm = vcpu->kvm;
+       pfn_t pfn0, pfn1;
+
+
+       if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+               pfn0 = 0;
+               pfn1 = 0;
+       } else {
+               kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
+               kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
+
+               pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
+               pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
+       }
+
+       if (hpa0)
+               *hpa0 = pfn0 << PAGE_SHIFT;
+
+       if (hpa1)
+               *hpa1 = pfn1 << PAGE_SHIFT;
+
+       /* Get attributes from the Guest TLB */
+       entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+                       kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+                       (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+                       (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+
+#ifdef DEBUG
+       kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+                 tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+
+       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+                                      tlb->tlb_mask);
+}
+
+int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
+{
+       int i;
+       int index = -1;
+       struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
+
+
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
+                       (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
+                       index = i;
+                       break;
+               }
+       }
+
+#ifdef DEBUG
+       kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
+                 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+#endif
+
+       return index;
+}
+
+int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
+{
+       unsigned long old_entryhi, flags;
+       volatile int idx;
+
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+
+       if (KVM_GUEST_KERNEL_MODE(vcpu))
+               write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
+       else {
+               write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+       }
+
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+#ifdef DEBUG
+       kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
+#endif
+
+       return idx;
+}
+
+int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
+{
+       int idx;
+       unsigned long flags, old_entryhi;
+
+       local_irq_save(flags);
+
+
+       old_entryhi = read_c0_entryhi();
+
+       write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       if (idx >= current_cpu_data.tlbsize)
+               BUG();
+
+       if (idx > 0) {
+               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+               mtc0_tlbw_hazard();
+
+               write_c0_entrylo0(0);
+               mtc0_tlbw_hazard();
+
+               write_c0_entrylo1(0);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               mtc0_tlbw_hazard();
+       }
+
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+#ifdef DEBUG
+       if (idx > 0) {
+               kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
+                         (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
+       }
+#endif
+
+       return 0;
+}
+
+/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
+int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
+{
+       unsigned long flags, old_entryhi;
+
+       if (index >= current_cpu_data.tlbsize)
+               BUG();
+
+       local_irq_save(flags);
+
+
+       old_entryhi = read_c0_entryhi();
+
+       write_c0_entryhi(UNIQUE_ENTRYHI(index));
+       mtc0_tlbw_hazard();
+
+       write_c0_index(index);
+       mtc0_tlbw_hazard();
+
+       write_c0_entrylo0(0);
+       mtc0_tlbw_hazard();
+
+       write_c0_entrylo1(0);
+       mtc0_tlbw_hazard();
+
+       tlb_write_indexed();
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+       return 0;
+}
+
+void kvm_mips_flush_host_tlb(int skip_kseg0)
+{
+       unsigned long flags;
+       unsigned long old_entryhi, entryhi;
+       unsigned long old_pagemask;
+       int entry = 0;
+       int maxentry = current_cpu_data.tlbsize;
+
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       /* Blast 'em all away. */
+       for (entry = 0; entry < maxentry; entry++) {
+
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+
+               if (skip_kseg0) {
+                       tlb_read();
+                       tlbw_use_hazard();
+
+                       entryhi = read_c0_entryhi();
+
+                       /* Don't blow away guest kernel entries */
+                       if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
+                               continue;
+                       }
+               }
+
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+               mtc0_tlbw_hazard();
+               write_c0_entrylo0(0);
+               mtc0_tlbw_hazard();
+               write_c0_entrylo1(0);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               mtc0_tlbw_hazard();
+       }
+
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+}
+
+void
+kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+                       struct kvm_vcpu *vcpu)
+{
+       unsigned long asid = asid_cache(cpu);
+
+       if (!(ASID_MASK(ASID_INC(asid)))) {
+               if (cpu_has_vtag_icache) {
+                       flush_icache_all();
+               }
+
+               kvm_local_flush_tlb_all();      /* start new asid cycle */
+
+               if (!asid)      /* fix version if needed */
+                       asid = ASID_FIRST_VERSION;
+       }
+
+       cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       unsigned long old_entryhi;
+       unsigned long old_pagemask;
+       int entry = 0;
+       int cpu = smp_processor_id();
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+               tlb_read();
+               tlbw_use_hazard();
+
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
+               vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
+       }
+
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+
+       local_irq_restore(flags);
+
+}
+
+void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       unsigned long old_ctx;
+       int entry;
+       int cpu = smp_processor_id();
+
+       local_irq_save(flags);
+
+       old_ctx = read_c0_entryhi();
+
+       for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+               write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
+               mtc0_tlbw_hazard();
+               write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
+               write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               tlbw_use_hazard();
+       }
+
+       tlbw_use_hazard();
+       write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
+       local_irq_restore(flags);
+}
+
+
+void kvm_local_flush_tlb_all(void)
+{
+       unsigned long flags;
+       unsigned long old_ctx;
+       int entry = 0;
+
+       local_irq_save(flags);
+       /* Save old context and create impossible VPN2 value */
+       old_ctx = read_c0_entryhi();
+       write_c0_entrylo0(0);
+       write_c0_entrylo1(0);
+
+       /* Blast 'em all away. */
+       while (entry < current_cpu_data.tlbsize) {
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+               tlb_write_indexed();
+               entry++;
+       }
+       tlbw_use_hazard();
+       write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
+
+       local_irq_restore(flags);
+}
+
+void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
+{
+       int cpu, entry;
+
+       for_each_possible_cpu(cpu) {
+               for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
+                           UNIQUE_ENTRYHI(entry);
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
+                       vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
+                           read_c0_pagemask();
+#ifdef DEBUG
+                       kvm_debug
+                           ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
+                            cpu, entry,
+                            vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
+                            vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
+                            vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+#endif
+               }
+       }
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       unsigned long flags;
+       int newasid = 0;
+
+#ifdef DEBUG
+       kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+#endif
+
+       /* Alocate new kernel and user ASIDs if needed */
+
+       local_irq_save(flags);
+
+       if (((vcpu->arch.
+             guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
+               kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+               vcpu->arch.guest_kernel_asid[cpu] =
+                   vcpu->arch.guest_kernel_mm.context.asid[cpu];
+               kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+               vcpu->arch.guest_user_asid[cpu] =
+                   vcpu->arch.guest_user_mm.context.asid[cpu];
+               newasid++;
+
+               kvm_info("[%d]: cpu_context: %#lx\n", cpu,
+                        cpu_context(cpu, current->mm));
+               kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+                        cpu, vcpu->arch.guest_kernel_asid[cpu]);
+               kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+                        vcpu->arch.guest_user_asid[cpu]);
+       }
+
+       if (vcpu->arch.last_sched_cpu != cpu) {
+               kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
+                        vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+       }
+
+       /* Only reload shadow host TLB if new ASIDs haven't been allocated */
+#if 0
+       if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
+               kvm_mips_flush_host_tlb(0);
+               kvm_shadow_tlb_load(vcpu);
+       }
+#endif
+
+       if (!newasid) {
+               /* If we preempted while the guest was executing, then reload the pre-empted ASID */
+               if (current->flags & PF_VCPU) {
+                       write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
+                       ehb();
+               }
+       } else {
+               /* New ASIDs were allocated for the VM */
+
+               /* Were we in guest context? If so then the pre-empted ASID is no longer
+                * valid, we need to set it to what it should be based on the mode of
+                * the Guest (Kernel/User)
+                */
+               if (current->flags & PF_VCPU) {
+                       if (KVM_GUEST_KERNEL_MODE(vcpu))
+                               write_c0_entryhi(ASID_MASK(vcpu->arch.
+                                                guest_kernel_asid[cpu]));
+                       else
+                               write_c0_entryhi(ASID_MASK(vcpu->arch.
+                                                guest_user_asid[cpu]));
+                       ehb();
+               }
+       }
+
+       local_irq_restore(flags);
+
+}
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       uint32_t cpu;
+
+       local_irq_save(flags);
+
+       cpu = smp_processor_id();
+
+
+       vcpu->arch.preempt_entryhi = read_c0_entryhi();
+       vcpu->arch.last_sched_cpu = cpu;
+
+#if 0
+       if ((atomic_read(&kvm_mips_instance) > 1)) {
+               kvm_shadow_tlb_put(vcpu);
+       }
+#endif
+
+       if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+            ASID_VERSION_MASK)) {
+               kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
+                         cpu_context(cpu, current->mm));
+               drop_mmu_context(current->mm, cpu);
+       }
+       write_c0_entryhi(cpu_asid(cpu, current->mm));
+       ehb();
+
+       local_irq_restore(flags);
+}
+
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long paddr, flags;
+       uint32_t inst;
+       int index;
+
+       if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
+           KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
+               if (index >= 0) {
+                       inst = *(opc);
+               } else {
+                       index =
+                           kvm_mips_guest_tlb_lookup(vcpu,
+                                                     ((unsigned long) opc & VPN2_MASK)
+                                                     |
+                                                     ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+                       if (index < 0) {
+                               kvm_err
+                                   ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+                                    __func__, opc, vcpu, read_c0_entryhi());
+                               kvm_mips_dump_host_tlbs();
+                               local_irq_restore(flags);
+                               return KVM_INVALID_INST;
+                       }
+                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+                                                            &vcpu->arch.
+                                                            guest_tlb[index],
+                                                            NULL, NULL);
+                       inst = *(opc);
+               }
+               local_irq_restore(flags);
+       } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               paddr =
+                   kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+                                                        (unsigned long) opc);
+               inst = *(uint32_t *) CKSEG0ADDR(paddr);
+       } else {
+               kvm_err("%s: illegal address: %p\n", __func__, opc);
+               return KVM_INVALID_INST;
+       }
+
+       return inst;
+}
+
+EXPORT_SYMBOL(kvm_local_flush_tlb_all);
+EXPORT_SYMBOL(kvm_shadow_tlb_put);
+EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
+EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
+EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
+EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
+EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
+EXPORT_SYMBOL(kvm_shadow_tlb_load);
+EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
+EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
+EXPORT_SYMBOL(kvm_get_inst);
+EXPORT_SYMBOL(kvm_arch_vcpu_load);
+EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
new file mode 100644 (file)
index 0000000..466aeef
--- /dev/null
@@ -0,0 +1,482 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+
+static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
+{
+       gpa_t gpa;
+       uint32_t kseg = KSEGX(gva);
+
+       if ((kseg == CKSEG0) || (kseg == CKSEG1))
+               gpa = CPHYSADDR(gva);
+       else {
+               printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
+               kvm_mips_dump_host_tlbs();
+               gpa = KVM_INVALID_ADDR;
+       }
+
+#ifdef DEBUG
+       kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
+#endif
+
+       return gpa;
+}
+
+
+static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+               er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+       } else
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+
+       switch (er) {
+       case EMULATE_DONE:
+               ret = RESUME_GUEST;
+               break;
+
+       case EMULATE_FAIL:
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+
+       case EMULATE_WAIT:
+               run->exit_reason = KVM_EXIT_INTR;
+               ret = RESUME_HOST;
+               break;
+
+       default:
+               BUG();
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+           || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+               kvm_debug
+                   ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+#endif
+               er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
+
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
+                * using HIGHMEM. Need to address this in a HIGHMEM kernel
+                */
+               printk
+                   ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       } else {
+               printk
+                   ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+           && KVM_GUEST_KERNEL_MODE(vcpu)) {
+               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+               kvm_debug
+                   ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+#endif
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               /* All KSEG0 faults are handled by KVM, as the guest kernel does not
+                * expect to ever get them
+                */
+               if (kvm_mips_handle_kseg0_tlb_fault
+                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               kvm_err
+                   ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+           && KVM_GUEST_KERNEL_MODE(vcpu)) {
+               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+               kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
+                         vcpu->arch.pc, badvaddr);
+#endif
+
+               /* User Address (UA) fault, this could happen if
+                * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+                *     case we pass on the fault to the guest kernel and let it handle it.
+                * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+                *     case we inject the TLB from the Guest TLB into the shadow host TLB
+                */
+
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               if (kvm_mips_handle_kseg0_tlb_fault
+                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               printk
+                   ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KVM_GUEST_KERNEL_MODE(vcpu)
+           && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+#ifdef DEBUG
+               kvm_debug("Emulate Store to MMIO space\n");
+#endif
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       printk("Emulate Store to MMIO space failed\n");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               printk
+                   ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
+#ifdef DEBUG
+               kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
+#endif
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       printk("Emulate Load from MMIO space failed\n");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               printk
+                   ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, opc, badvaddr);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               er = EMULATE_FAIL;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_handle_ri(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int
+kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
+       kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
+       kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
+       kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
+       kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
+
+       kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
+       kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
+       kvm_write_c0_guest_pagemask(cop0,
+                                   regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
+       kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
+       kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
+
+       return 0;
+}
+
+static int
+kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
+       regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
+       regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
+       regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
+       regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
+
+       regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
+       regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
+       regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
+           kvm_read_c0_guest_pagemask(cop0);
+       regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
+       regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
+
+       regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
+       regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
+       regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
+       regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
+       regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
+
+       return 0;
+}
+
+static int kvm_trap_emul_vm_init(struct kvm *kvm)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t config1;
+       int vcpu_id = vcpu->vcpu_id;
+
+       /* Arch specific stuff, set up config registers properly so that the
+        * guest will come up as expected, for now we simulate a
+        * MIPS 24kc
+        */
+       kvm_write_c0_guest_prid(cop0, 0x00019300);
+       kvm_write_c0_guest_config(cop0,
+                                 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+                                 (MMU_TYPE_R4000 << CP0C0_MT));
+
+       /* Read the cache characteristics from the host Config1 Register */
+       config1 = (read_c0_config1() & ~0x7f);
+
+       /* Set up MMU size */
+       config1 &= ~(0x3f << 25);
+       config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
+
+       /* We unset some bits that we aren't emulating */
+       config1 &=
+           ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
+             (1 << CP0C1_WR) | (1 << CP0C1_CA));
+       kvm_write_c0_guest_config1(cop0, config1);
+
+       kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
+       /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
+       kvm_write_c0_guest_config3(cop0,
+                                  MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
+                                                                      CP0C3_ULRI));
+
+       /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
+       kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
+
+       /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
+       kvm_write_c0_guest_intctl(cop0, 0xFC000000);
+
+       /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
+       kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+
+       return 0;
+}
+
+static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+       /* exit handlers */
+       .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
+       .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
+       .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
+       .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
+       .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
+       .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
+       .handle_syscall = kvm_trap_emul_handle_syscall,
+       .handle_res_inst = kvm_trap_emul_handle_res_inst,
+       .handle_break = kvm_trap_emul_handle_break,
+
+       .vm_init = kvm_trap_emul_vm_init,
+       .vcpu_init = kvm_trap_emul_vcpu_init,
+       .vcpu_setup = kvm_trap_emul_vcpu_setup,
+       .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
+       .queue_timer_int = kvm_mips_queue_timer_int_cb,
+       .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
+       .queue_io_int = kvm_mips_queue_io_int_cb,
+       .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
+       .irq_deliver = kvm_mips_irq_deliver_cb,
+       .irq_clear = kvm_mips_irq_clear_cb,
+       .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
+       .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+       *install_callbacks = &kvm_trap_emul_callbacks;
+       return 0;
+}
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644 (file)
index 0000000..bc9e0f4
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License.  See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
+/*
+ * Tracepoints for VM eists
+ */
+extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
+
+TRACE_EVENT(kvm_exit,
+           TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+           TP_ARGS(vcpu, reason),
+           TP_STRUCT__entry(
+                       __field(struct kvm_vcpu *, vcpu)
+                       __field(unsigned int, reason)
+           ),
+
+           TP_fast_assign(
+                       __entry->vcpu = vcpu;
+                       __entry->reason = reason;
+           ),
+
+           TP_printk("[%s]PC: 0x%08lx",
+                     kvm_mips_exit_types_str[__entry->reason],
+                     __entry->vcpu->arch.pc)
+);
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index a64daee740ee0414ee6a8db474b32893ae51ab1d..3b2a1e78a54369fc43bf052288f3fc7acaac5ae4 100644 (file)
@@ -19,7 +19,7 @@
  */
 void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(__mips_set_bit);
  */
 void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(__mips_clear_bit);
  */
 void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(__mips_change_bit);
 int __mips_test_and_set_bit(unsigned long nr,
                            volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -112,7 +112,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit);
 int __mips_test_and_set_bit_lock(unsigned long nr,
                                 volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -137,7 +137,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
  */
 int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
@@ -162,7 +162,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
  */
 int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
 {
-       volatile unsigned long *a = addr;
+       unsigned long *a = (unsigned long *)addr;
        unsigned bit = nr & SZLONG_MASK;
        unsigned long mask;
        unsigned long flags;
index 32b9f21bfd8562f37d8e51e1ad23908c320ad3e8..8a12d00908e024ab3559681955182f7f216c0155 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
+#include <asm/mmu_context.h>
 
 static inline const char *msk2str(unsigned int mask)
 {
@@ -55,7 +56,7 @@ static void dump_tlb(int first, int last)
        s_pagemask = read_c0_pagemask();
        s_entryhi = read_c0_entryhi();
        s_index = read_c0_index();
-       asid = s_entryhi & 0xff;
+       asid = ASID_MASK(s_entryhi);
 
        for (i = first; i <= last; i++) {
                write_c0_index(i);
@@ -85,7 +86,7 @@ static void dump_tlb(int first, int last)
 
                        printk("va=%0*lx asid=%02lx\n",
                               width, (entryhi & ~0x1fffUL),
-                              entryhi & 0xff);
+                              ASID_MASK(entryhi));
                        printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
                               width,
                               (entrylo0 << 6) & PAGE_MASK, c0,
index 053d3b0b0317b18d848b2b2419025ebb1f23db31..0580194e7402aa6508c579e2ff925833fd421948 100644 (file)
@@ -5,7 +5,8 @@
  *
  * Copyright (C) 1998, 1999, 2000 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2007 by Maciej W. Rozycki
+ * Copyright (C) 2011, 2012 MIPS Technologies, Inc.
  */
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
 #define LONG_S_R sdr
 #endif
 
+#ifdef CONFIG_CPU_MICROMIPS
+#define STORSIZE (LONGSIZE * 2)
+#define STORMASK (STORSIZE - 1)
+#define FILL64RG t8
+#define FILLPTRG t7
+#undef  LONG_S
+#define LONG_S LONG_SP
+#else
+#define STORSIZE LONGSIZE
+#define STORMASK LONGMASK
+#define FILL64RG a1
+#define FILLPTRG t0
+#endif
+
 #define EX(insn,reg,addr,handler)                      \
 9:     insn    reg, addr;                              \
        .section __ex_table,"a";                        \
        .previous
 
        .macro  f_fill64 dst, offset, val, fixup
-       EX(LONG_S, \val, (\offset +  0 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  1 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  2 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  3 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  4 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  5 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  6 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  7 * LONGSIZE)(\dst), \fixup)
-#if LONGSIZE == 4
-       EX(LONG_S, \val, (\offset +  8 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset +  9 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 11 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 12 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup)
-       EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  0 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  1 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  2 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  3 * STORSIZE)(\dst), \fixup)
+#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS))
+       EX(LONG_S, \val, (\offset +  4 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  5 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  6 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  7 * STORSIZE)(\dst), \fixup)
+#endif
+#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4))
+       EX(LONG_S, \val, (\offset +  8 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset +  9 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup)
+       EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup)
 #endif
        .endm
 
@@ -71,16 +88,20 @@ LEAF(memset)
 1:
 
 FEXPORT(__bzero)
-       sltiu           t0, a2, LONGSIZE        /* very small region? */
+       sltiu           t0, a2, STORSIZE        /* very small region? */
        bnez            t0, .Lsmall_memset
-        andi           t0, a0, LONGMASK        /* aligned? */
+        andi           t0, a0, STORMASK        /* aligned? */
 
+#ifdef CONFIG_CPU_MICROMIPS
+       move            t8, a1                  /* used by 'swp' instruction */
+       move            t9, a1
+#endif
 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
        beqz            t0, 1f
-        PTR_SUBU       t0, LONGSIZE            /* alignment in bytes */
+        PTR_SUBU       t0, STORSIZE            /* alignment in bytes */
 #else
        .set            noat
-       li              AT, LONGSIZE
+       li              AT, STORSIZE
        beqz            t0, 1f
         PTR_SUBU       t0, AT                  /* alignment in bytes */
        .set            at
@@ -99,24 +120,27 @@ FEXPORT(__bzero)
 1:     ori             t1, a2, 0x3f            /* # of full blocks */
        xori            t1, 0x3f
        beqz            t1, .Lmemset_partial    /* no block to fill */
-        andi           t0, a2, 0x40-LONGSIZE
+        andi           t0, a2, 0x40-STORSIZE
 
        PTR_ADDU        t1, a0                  /* end address */
        .set            reorder
 1:     PTR_ADDIU       a0, 64
        R10KCBARRIER(0(ra))
-       f_fill64 a0, -64, a1, .Lfwd_fixup
+       f_fill64 a0, -64, FILL64RG, .Lfwd_fixup
        bne             t1, a0, 1b
        .set            noreorder
 
 .Lmemset_partial:
        R10KCBARRIER(0(ra))
        PTR_LA          t1, 2f                  /* where to start */
+#ifdef CONFIG_CPU_MICROMIPS
+       LONG_SRL        t7, t0, 1
+#endif
 #if LONGSIZE == 4
-       PTR_SUBU        t1, t0
+       PTR_SUBU        t1, FILLPTRG
 #else
        .set            noat
-       LONG_SRL                AT, t0, 1
+       LONG_SRL        AT, FILLPTRG, 1
        PTR_SUBU        t1, AT
        .set            at
 #endif
@@ -126,9 +150,9 @@ FEXPORT(__bzero)
        .set            push
        .set            noreorder
        .set            nomacro
-       f_fill64 a0, -64, a1, .Lpartial_fixup   /* ... but first do longs ... */
+       f_fill64 a0, -64, FILL64RG, .Lpartial_fixup     /* ... but first do longs ... */
 2:     .set            pop
-       andi            a2, LONGMASK            /* At most one long to go */
+       andi            a2, STORMASK            /* At most one long to go */
 
        beqz            a2, 1f
         PTR_ADDU       a0, a2                  /* What's left */
@@ -169,7 +193,7 @@ FEXPORT(__bzero)
 
 .Lpartial_fixup:
        PTR_L           t0, TI_TASK($28)
-       andi            a2, LONGMASK
+       andi            a2, STORMASK
        LONG_L          t0, THREAD_BUADDR(t0)
        LONG_ADDU       a2, t1
        jr              ra
@@ -177,4 +201,4 @@ FEXPORT(__bzero)
 
 .Llast_fixup:
        jr              ra
-        andi           v1, a2, LONGMASK
+        andi           v1, a2, STORMASK
index cd160be3ce4dc97b934b566b4c69197ceb07b342..6807f7172eaf46f2841301d64882484f11be861f 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/compiler.h>
 #include <linux/preempt.h>
 #include <linux/export.h>
+#include <linux/stringify.h>
 
 #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
 
  *
  * Workaround: mask EXL bit of the result or place a nop before mfc0.
  */
-__asm__(
-       "       .macro  arch_local_irq_disable\n"
+notrace void arch_local_irq_disable(void)
+{
+       preempt_disable();
+
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    noat                                            \n"
 #ifdef CONFIG_MIPS_MT_SMTC
@@ -52,108 +56,98 @@ __asm__(
        "       .set    noreorder                                       \n"
        "       mtc0    $1,$12                                          \n"
 #endif
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : /* no outputs */
+       : /* no inputs */
+       : "memory");
 
-notrace void arch_local_irq_disable(void)
-{
-       preempt_disable();
-       __asm__ __volatile__(
-               "arch_local_irq_disable"
-               : /* no outputs */
-               : /* no inputs */
-               : "memory");
        preempt_enable();
 }
 EXPORT_SYMBOL(arch_local_irq_disable);
 
 
-__asm__(
-       "       .macro  arch_local_irq_save result                      \n"
+notrace unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
+
+       preempt_disable();
+
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
 #ifdef CONFIG_MIPS_MT_SMTC
-       "       mfc0    \\result, $2, 1                                 \n"
-       "       ori     $1, \\result, 0x400                             \n"
+       "       mfc0    %[flags], $2, 1                         \n"
+       "       ori     $1, %[flags], 0x400                             \n"
        "       .set    noreorder                                       \n"
        "       mtc0    $1, $2, 1                                       \n"
-       "       andi    \\result, \\result, 0x400                       \n"
+       "       andi    %[flags], %[flags], 0x400                       \n"
 #elif defined(CONFIG_CPU_MIPSR2)
        /* see irqflags.h for inline function */
 #else
-       "       mfc0    \\result, $12                                   \n"
-       "       ori     $1, \\result, 0x1f                              \n"
+       "       mfc0    %[flags], $12                                   \n"
+       "       ori     $1, %[flags], 0x1f                              \n"
        "       xori    $1, 0x1f                                        \n"
        "       .set    noreorder                                       \n"
        "       mtc0    $1, $12                                         \n"
 #endif
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : [flags] "=r" (flags)
+       : /* no inputs */
+       : "memory");
 
-notrace unsigned long arch_local_irq_save(void)
-{
-       unsigned long flags;
-       preempt_disable();
-       asm volatile("arch_local_irq_save\t%0"
-                    : "=r" (flags)
-                    : /* no inputs */
-                    : "memory");
        preempt_enable();
+
        return flags;
 }
 EXPORT_SYMBOL(arch_local_irq_save);
 
+notrace void arch_local_irq_restore(unsigned long flags)
+{
+       unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC kernel needs to do a software replay of queued
+        * IPIs, at the cost of branch and call overhead on each
+        * local_irq_restore()
+        */
+       if (unlikely(!(flags & 0x0400)))
+               smtc_ipi_replay();
+#endif
+       preempt_disable();
 
-__asm__(
-       "       .macro  arch_local_irq_restore flags                    \n"
+       __asm__ __volatile__(
        "       .set    push                                            \n"
        "       .set    noreorder                                       \n"
        "       .set    noat                                            \n"
 #ifdef CONFIG_MIPS_MT_SMTC
-       "mfc0   $1, $2, 1                                               \n"
-       "andi   \\flags, 0x400                                          \n"
-       "ori    $1, 0x400                                               \n"
-       "xori   $1, 0x400                                               \n"
-       "or     \\flags, $1                                             \n"
-       "mtc0   \\flags, $2, 1                                          \n"
+       "       mfc0    $1, $2, 1                                       \n"
+       "       andi    %[flags], 0x400                                 \n"
+       "       ori     $1, 0x400                                       \n"
+       "       xori    $1, 0x400                                       \n"
+       "       or      %[flags], $1                                    \n"
+       "       mtc0    %[flags], $2, 1                                 \n"
 #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
        /* see irqflags.h for inline function */
 #elif defined(CONFIG_CPU_MIPSR2)
        /* see irqflags.h for inline function */
 #else
        "       mfc0    $1, $12                                         \n"
-       "       andi    \\flags, 1                                      \n"
+       "       andi    %[flags], 1                                     \n"
        "       ori     $1, 0x1f                                        \n"
        "       xori    $1, 0x1f                                        \n"
-       "       or      \\flags, $1                                     \n"
-       "       mtc0    \\flags, $12                                    \n"
+       "       or      %[flags], $1                                    \n"
+       "       mtc0    %[flags], $12                                   \n"
 #endif
-       "       irq_disable_hazard                                      \n"
+       "       " __stringify(__irq_disable_hazard) "                   \n"
        "       .set    pop                                             \n"
-       "       .endm                                                   \n");
+       : [flags] "=r" (__tmp1)
+       : "0" (flags)
+       : "memory");
 
-notrace void arch_local_irq_restore(unsigned long flags)
-{
-       unsigned long __tmp1;
-
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC kernel needs to do a software replay of queued
-        * IPIs, at the cost of branch and call overhead on each
-        * local_irq_restore()
-        */
-       if (unlikely(!(flags & 0x0400)))
-               smtc_ipi_replay();
-#endif
-       preempt_disable();
-       __asm__ __volatile__(
-               "arch_local_irq_restore\t%0"
-               : "=r" (__tmp1)
-               : "0" (flags)
-               : "memory");
        preempt_enable();
 }
 EXPORT_SYMBOL(arch_local_irq_restore);
@@ -164,11 +158,36 @@ notrace void __arch_local_irq_restore(unsigned long flags)
        unsigned long __tmp1;
 
        preempt_disable();
+
        __asm__ __volatile__(
-               "arch_local_irq_restore\t%0"
-               : "=r" (__tmp1)
-               : "0" (flags)
-               : "memory");
+       "       .set    push                                            \n"
+       "       .set    noreorder                                       \n"
+       "       .set    noat                                            \n"
+#ifdef CONFIG_MIPS_MT_SMTC
+       "       mfc0    $1, $2, 1                                       \n"
+       "       andi    %[flags], 0x400                                 \n"
+       "       ori     $1, 0x400                                       \n"
+       "       xori    $1, 0x400                                       \n"
+       "       or      %[flags], $1                                    \n"
+       "       mtc0    %[flags], $2, 1                                 \n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+       /* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+       /* see irqflags.h for inline function */
+#else
+       "       mfc0    $1, $12                                         \n"
+       "       andi    %[flags], 1                                     \n"
+       "       ori     $1, 0x1f                                        \n"
+       "       xori    $1, 0x1f                                        \n"
+       "       or      %[flags], $1                                    \n"
+       "       mtc0    %[flags], $12                                   \n"
+#endif
+       "       " __stringify(__irq_disable_hazard) "                   \n"
+       "       .set    pop                                             \n"
+       : [flags] "=r" (__tmp1)
+       : "0" (flags)
+       : "memory");
+
        preempt_enable();
 }
 EXPORT_SYMBOL(__arch_local_irq_restore);
index 91615c2ef0cf969baeff215ca3d8a627e3851d2f..8327698b99377e0e78c74a7bf0bef19e657fbd53 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mm.h>
 
 #include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
@@ -21,7 +22,7 @@ static void dump_tlb(int first, int last)
        unsigned int asid;
        unsigned long entryhi, entrylo0;
 
-       asid = read_c0_entryhi() & 0xfc0;
+       asid = ASID_MASK(read_c0_entryhi());
 
        for (i = first; i <= last; i++) {
                write_c0_index(i<<8);
@@ -35,7 +36,7 @@ static void dump_tlb(int first, int last)
 
                /* Unused entries have a virtual address of KSEG0.  */
                if ((entryhi & 0xffffe000) != 0x80000000
-                   && (entryhi & 0xfc0) == asid) {
+                   && (ASID_MASK(entryhi) == asid)) {
                        /*
                         * Only print entries in use
                         */
@@ -44,7 +45,7 @@ static void dump_tlb(int first, int last)
                        printk("va=%08lx asid=%08lx"
                               "  [pa=%06lx n=%d d=%d v=%d g=%d]",
                               (entryhi & 0xffffe000),
-                              entryhi & 0xfc0,
+                              ASID_MASK(entryhi),
                               entrylo0 & PAGE_MASK,
                               (entrylo0 & (1 << 11)) ? 1 : 0,
                               (entrylo0 & (1 << 10)) ? 1 : 0,
index fdbb970f670d84ed2bafcbd42cc4e9eb46b96156..e362dcdc69d1617486ee5627f055ffc3075b5548 100644 (file)
@@ -3,8 +3,9 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle
- * Copyright (c) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2011 MIPS Technologies, Inc.
  */
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
@@ -28,9 +29,9 @@ LEAF(__strlen_user_asm)
 
 FEXPORT(__strlen_user_nocheck_asm)
        move            v0, a0
-1:     EX(lb, t0, (v0), .Lfault)
+1:     EX(lbu, v1, (v0), .Lfault)
        PTR_ADDIU       v0, 1
-       bnez            t0, 1b
+       bnez            v1, 1b
        PTR_SUBU        v0, a0
        jr              ra
        END(__strlen_user_asm)
index bad5394875031ecb08e668a53b5bbca70649de95..92870b6b53eaeee044a0424ef1cdd16870ec445a 100644 (file)
@@ -3,7 +3,8 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 1996, 1999 by Ralf Baechle
+ * Copyright (C) 2011 MIPS Technologies, Inc.
  */
 #include <linux/errno.h>
 #include <asm/asm.h>
@@ -33,26 +34,27 @@ LEAF(__strncpy_from_user_asm)
        bnez            v0, .Lfault
 
 FEXPORT(__strncpy_from_user_nocheck_asm)
-       move            v0, zero
-       move            v1, a1
        .set            noreorder
-1:     EX(lbu, t0, (v1), .Lfault)
+       move            t0, zero
+       move            v1, a1
+1:     EX(lbu, v0, (v1), .Lfault)
        PTR_ADDIU       v1, 1
        R10KCBARRIER(0(ra))
-       beqz            t0, 2f
-        sb             t0, (a0)
-       PTR_ADDIU       v0, 1
-       .set            reorder
-       PTR_ADDIU       a0, 1
-       bne             v0, a2, 1b
-2:     PTR_ADDU        t0, a1, v0
-       xor             t0, a1
-       bltz            t0, .Lfault
+       beqz            v0, 2f
+        sb             v0, (a0)
+       PTR_ADDIU       t0, 1
+       bne             t0, a2, 1b
+        PTR_ADDIU      a0, 1
+2:     PTR_ADDU        v0, a1, t0
+       xor             v0, a1
+       bltz            v0, .Lfault
+        nop
        jr              ra                      # return n
+        move           v0, t0
        END(__strncpy_from_user_asm)
 
-.Lfault:       li              v0, -EFAULT
-       jr              ra
+.Lfault: jr            ra
+         li            v0, -EFAULT
 
        .section        __ex_table,"a"
        PTR             1b, .Lfault
index beea03c8c0ce823130ab25b421f6874b5a2a13b8..fcacea5e61f1e685891e8e4af7ec8f701c596fd2 100644 (file)
@@ -35,7 +35,7 @@ FEXPORT(__strnlen_user_nocheck_asm)
        PTR_ADDU        a1, a0                  # stop pointer
 1:     beq             v0, a1, 1f              # limit reached?
        EX(lb, t0, (v0), .Lfault)
-       PTR_ADD       v0, 1
+       PTR_ADDIU       v0, 1
        bnez            t0, 1b
 1:     PTR_SUBU        v0, a0
        jr              ra
index afb5a0bcf7a5a524c4fe75450be559e8402c68d4..f03771900813cb69c9ddd5f579841350a8c48786 100644 (file)
@@ -45,6 +45,7 @@
 #include <asm/signal.h>
 #include <asm/mipsregs.h>
 #include <asm/fpu_emulator.h>
+#include <asm/fpu.h>
 #include <asm/uaccess.h>
 #include <asm/branch.h>
 
@@ -81,6 +82,11 @@ DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
 /* Determine rounding mode from the RM bits of the FCSR */
 #define modeindex(v) ((v) & FPU_CSR_RM)
 
+/* microMIPS bitfields */
+#define MM_POOL32A_MINOR_MASK  0x3f
+#define MM_POOL32A_MINOR_SHIFT 0x6
+#define MM_MIPS32_COND_FC      0x30
+
 /* Convert Mips rounding mode (0..3) to IEEE library modes. */
 static const unsigned char ieee_rm[4] = {
        [FPU_CSR_RN] = IEEE754_RN,
@@ -110,6 +116,556 @@ static const unsigned int fpucondbit[8] = {
 };
 #endif
 
+/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
+static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
+
+/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */
+static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0};
+static const int sdps_format[] = {16, 17, 22, 0, 0, 0, 0, 0};
+static const int dwl_format[] = {17, 20, 21, 0, 0, 0, 0, 0};
+static const int swl_format[] = {16, 20, 21, 0, 0, 0, 0, 0};
+
+/*
+ * This functions translates a 32-bit microMIPS instruction
+ * into a 32-bit MIPS32 instruction. Returns 0 on success
+ * and SIGILL otherwise.
+ */
+static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
+{
+       union mips_instruction insn = *insn_ptr;
+       union mips_instruction mips32_insn = insn;
+       int func, fmt, op;
+
+       switch (insn.mm_i_format.opcode) {
+       case mm_ldc132_op:
+               mips32_insn.mm_i_format.opcode = ldc1_op;
+               mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+               mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+               break;
+       case mm_lwc132_op:
+               mips32_insn.mm_i_format.opcode = lwc1_op;
+               mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+               mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+               break;
+       case mm_sdc132_op:
+               mips32_insn.mm_i_format.opcode = sdc1_op;
+               mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+               mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+               break;
+       case mm_swc132_op:
+               mips32_insn.mm_i_format.opcode = swc1_op;
+               mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
+               mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
+               break;
+       case mm_pool32i_op:
+               /* NOTE: offset is << by 1 if in microMIPS mode. */
+               if ((insn.mm_i_format.rt == mm_bc1f_op) ||
+                   (insn.mm_i_format.rt == mm_bc1t_op)) {
+                       mips32_insn.fb_format.opcode = cop1_op;
+                       mips32_insn.fb_format.bc = bc_op;
+                       mips32_insn.fb_format.flag =
+                               (insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
+               } else
+                       return SIGILL;
+               break;
+       case mm_pool32f_op:
+               switch (insn.mm_fp0_format.func) {
+               case mm_32f_01_op:
+               case mm_32f_11_op:
+               case mm_32f_02_op:
+               case mm_32f_12_op:
+               case mm_32f_41_op:
+               case mm_32f_51_op:
+               case mm_32f_42_op:
+               case mm_32f_52_op:
+                       op = insn.mm_fp0_format.func;
+                       if (op == mm_32f_01_op)
+                               func = madd_s_op;
+                       else if (op == mm_32f_11_op)
+                               func = madd_d_op;
+                       else if (op == mm_32f_02_op)
+                               func = nmadd_s_op;
+                       else if (op == mm_32f_12_op)
+                               func = nmadd_d_op;
+                       else if (op == mm_32f_41_op)
+                               func = msub_s_op;
+                       else if (op == mm_32f_51_op)
+                               func = msub_d_op;
+                       else if (op == mm_32f_42_op)
+                               func = nmsub_s_op;
+                       else
+                               func = nmsub_d_op;
+                       mips32_insn.fp6_format.opcode = cop1x_op;
+                       mips32_insn.fp6_format.fr = insn.mm_fp6_format.fr;
+                       mips32_insn.fp6_format.ft = insn.mm_fp6_format.ft;
+                       mips32_insn.fp6_format.fs = insn.mm_fp6_format.fs;
+                       mips32_insn.fp6_format.fd = insn.mm_fp6_format.fd;
+                       mips32_insn.fp6_format.func = func;
+                       break;
+               case mm_32f_10_op:
+                       func = -1;      /* Invalid */
+                       op = insn.mm_fp5_format.op & 0x7;
+                       if (op == mm_ldxc1_op)
+                               func = ldxc1_op;
+                       else if (op == mm_sdxc1_op)
+                               func = sdxc1_op;
+                       else if (op == mm_lwxc1_op)
+                               func = lwxc1_op;
+                       else if (op == mm_swxc1_op)
+                               func = swxc1_op;
+
+                       if (func != -1) {
+                               mips32_insn.r_format.opcode = cop1x_op;
+                               mips32_insn.r_format.rs =
+                                       insn.mm_fp5_format.base;
+                               mips32_insn.r_format.rt =
+                                       insn.mm_fp5_format.index;
+                               mips32_insn.r_format.rd = 0;
+                               mips32_insn.r_format.re = insn.mm_fp5_format.fd;
+                               mips32_insn.r_format.func = func;
+                       } else
+                               return SIGILL;
+                       break;
+               case mm_32f_40_op:
+                       op = -1;        /* Invalid */
+                       if (insn.mm_fp2_format.op == mm_fmovt_op)
+                               op = 1;
+                       else if (insn.mm_fp2_format.op == mm_fmovf_op)
+                               op = 0;
+                       if (op != -1) {
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp2_format.fmt];
+                               mips32_insn.fp0_format.ft =
+                                       (insn.mm_fp2_format.cc<<2) + op;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp2_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp2_format.fd;
+                               mips32_insn.fp0_format.func = fmovc_op;
+                       } else
+                               return SIGILL;
+                       break;
+               case mm_32f_60_op:
+                       func = -1;      /* Invalid */
+                       if (insn.mm_fp0_format.op == mm_fadd_op)
+                               func = fadd_op;
+                       else if (insn.mm_fp0_format.op == mm_fsub_op)
+                               func = fsub_op;
+                       else if (insn.mm_fp0_format.op == mm_fmul_op)
+                               func = fmul_op;
+                       else if (insn.mm_fp0_format.op == mm_fdiv_op)
+                               func = fdiv_op;
+                       if (func != -1) {
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp0_format.fmt];
+                               mips32_insn.fp0_format.ft =
+                                       insn.mm_fp0_format.ft;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp0_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp0_format.fd;
+                               mips32_insn.fp0_format.func = func;
+                       } else
+                               return SIGILL;
+                       break;
+               case mm_32f_70_op:
+                       func = -1;      /* Invalid */
+                       if (insn.mm_fp0_format.op == mm_fmovn_op)
+                               func = fmovn_op;
+                       else if (insn.mm_fp0_format.op == mm_fmovz_op)
+                               func = fmovz_op;
+                       if (func != -1) {
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp0_format.fmt];
+                               mips32_insn.fp0_format.ft =
+                                       insn.mm_fp0_format.ft;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp0_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp0_format.fd;
+                               mips32_insn.fp0_format.func = func;
+                       } else
+                               return SIGILL;
+                       break;
+               case mm_32f_73_op:    /* POOL32FXF */
+                       switch (insn.mm_fp1_format.op) {
+                       case mm_movf0_op:
+                       case mm_movf1_op:
+                       case mm_movt0_op:
+                       case mm_movt1_op:
+                               if ((insn.mm_fp1_format.op & 0x7f) ==
+                                   mm_movf0_op)
+                                       op = 0;
+                               else
+                                       op = 1;
+                               mips32_insn.r_format.opcode = spec_op;
+                               mips32_insn.r_format.rs = insn.mm_fp4_format.fs;
+                               mips32_insn.r_format.rt =
+                                       (insn.mm_fp4_format.cc << 2) + op;
+                               mips32_insn.r_format.rd = insn.mm_fp4_format.rt;
+                               mips32_insn.r_format.re = 0;
+                               mips32_insn.r_format.func = movc_op;
+                               break;
+                       case mm_fcvtd0_op:
+                       case mm_fcvtd1_op:
+                       case mm_fcvts0_op:
+                       case mm_fcvts1_op:
+                               if ((insn.mm_fp1_format.op & 0x7f) ==
+                                   mm_fcvtd0_op) {
+                                       func = fcvtd_op;
+                                       fmt = swl_format[insn.mm_fp3_format.fmt];
+                               } else {
+                                       func = fcvts_op;
+                                       fmt = dwl_format[insn.mm_fp3_format.fmt];
+                               }
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt = fmt;
+                               mips32_insn.fp0_format.ft = 0;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp3_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp3_format.rt;
+                               mips32_insn.fp0_format.func = func;
+                               break;
+                       case mm_fmov0_op:
+                       case mm_fmov1_op:
+                       case mm_fabs0_op:
+                       case mm_fabs1_op:
+                       case mm_fneg0_op:
+                       case mm_fneg1_op:
+                               if ((insn.mm_fp1_format.op & 0x7f) ==
+                                   mm_fmov0_op)
+                                       func = fmov_op;
+                               else if ((insn.mm_fp1_format.op & 0x7f) ==
+                                        mm_fabs0_op)
+                                       func = fabs_op;
+                               else
+                                       func = fneg_op;
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp3_format.fmt];
+                               mips32_insn.fp0_format.ft = 0;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp3_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp3_format.rt;
+                               mips32_insn.fp0_format.func = func;
+                               break;
+                       case mm_ffloorl_op:
+                       case mm_ffloorw_op:
+                       case mm_fceill_op:
+                       case mm_fceilw_op:
+                       case mm_ftruncl_op:
+                       case mm_ftruncw_op:
+                       case mm_froundl_op:
+                       case mm_froundw_op:
+                       case mm_fcvtl_op:
+                       case mm_fcvtw_op:
+                               if (insn.mm_fp1_format.op == mm_ffloorl_op)
+                                       func = ffloorl_op;
+                               else if (insn.mm_fp1_format.op == mm_ffloorw_op)
+                                       func = ffloor_op;
+                               else if (insn.mm_fp1_format.op == mm_fceill_op)
+                                       func = fceill_op;
+                               else if (insn.mm_fp1_format.op == mm_fceilw_op)
+                                       func = fceil_op;
+                               else if (insn.mm_fp1_format.op == mm_ftruncl_op)
+                                       func = ftruncl_op;
+                               else if (insn.mm_fp1_format.op == mm_ftruncw_op)
+                                       func = ftrunc_op;
+                               else if (insn.mm_fp1_format.op == mm_froundl_op)
+                                       func = froundl_op;
+                               else if (insn.mm_fp1_format.op == mm_froundw_op)
+                                       func = fround_op;
+                               else if (insn.mm_fp1_format.op == mm_fcvtl_op)
+                                       func = fcvtl_op;
+                               else
+                                       func = fcvtw_op;
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sd_format[insn.mm_fp1_format.fmt];
+                               mips32_insn.fp0_format.ft = 0;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp1_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp1_format.rt;
+                               mips32_insn.fp0_format.func = func;
+                               break;
+                       case mm_frsqrt_op:
+                       case mm_fsqrt_op:
+                       case mm_frecip_op:
+                               if (insn.mm_fp1_format.op == mm_frsqrt_op)
+                                       func = frsqrt_op;
+                               else if (insn.mm_fp1_format.op == mm_fsqrt_op)
+                                       func = fsqrt_op;
+                               else
+                                       func = frecip_op;
+                               mips32_insn.fp0_format.opcode = cop1_op;
+                               mips32_insn.fp0_format.fmt =
+                                       sdps_format[insn.mm_fp1_format.fmt];
+                               mips32_insn.fp0_format.ft = 0;
+                               mips32_insn.fp0_format.fs =
+                                       insn.mm_fp1_format.fs;
+                               mips32_insn.fp0_format.fd =
+                                       insn.mm_fp1_format.rt;
+                               mips32_insn.fp0_format.func = func;
+                               break;
+                       case mm_mfc1_op:
+                       case mm_mtc1_op:
+                       case mm_cfc1_op:
+                       case mm_ctc1_op:
+                               if (insn.mm_fp1_format.op == mm_mfc1_op)
+                                       op = mfc_op;
+                               else if (insn.mm_fp1_format.op == mm_mtc1_op)
+                                       op = mtc_op;
+                               else if (insn.mm_fp1_format.op == mm_cfc1_op)
+                                       op = cfc_op;
+                               else
+                                       op = ctc_op;
+                               mips32_insn.fp1_format.opcode = cop1_op;
+                               mips32_insn.fp1_format.op = op;
+                               mips32_insn.fp1_format.rt =
+                                       insn.mm_fp1_format.rt;
+                               mips32_insn.fp1_format.fs =
+                                       insn.mm_fp1_format.fs;
+                               mips32_insn.fp1_format.fd = 0;
+                               mips32_insn.fp1_format.func = 0;
+                               break;
+                       default:
+                               return SIGILL;
+                               break;
+                       }
+                       break;
+               case mm_32f_74_op:      /* c.cond.fmt */
+                       mips32_insn.fp0_format.opcode = cop1_op;
+                       mips32_insn.fp0_format.fmt =
+                               sdps_format[insn.mm_fp4_format.fmt];
+                       mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt;
+                       mips32_insn.fp0_format.fs = insn.mm_fp4_format.fs;
+                       mips32_insn.fp0_format.fd = insn.mm_fp4_format.cc << 2;
+                       mips32_insn.fp0_format.func =
+                               insn.mm_fp4_format.cond | MM_MIPS32_COND_FC;
+                       break;
+               default:
+                       return SIGILL;
+                       break;
+               }
+               break;
+       default:
+               return SIGILL;
+               break;
+       }
+
+       *insn_ptr = mips32_insn;
+       return 0;
+}
+
+int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+                    unsigned long *contpc)
+{
+       union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+       int bc_false = 0;
+       unsigned int fcr31;
+       unsigned int bit;
+
+       switch (insn.mm_i_format.opcode) {
+       case mm_pool32a_op:
+               if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
+                   mm_pool32axf_op) {
+                       switch (insn.mm_i_format.simmediate >>
+                               MM_POOL32A_MINOR_SHIFT) {
+                       case mm_jalr_op:
+                       case mm_jalrhb_op:
+                       case mm_jalrs_op:
+                       case mm_jalrshb_op:
+                               if (insn.mm_i_format.rt != 0)   /* Not mm_jr */
+                                       regs->regs[insn.mm_i_format.rt] =
+                                               regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               dec_insn.next_pc_inc;
+                               *contpc = regs->regs[insn.mm_i_format.rs];
+                               return 1;
+                               break;
+                       }
+               }
+               break;
+       case mm_pool32i_op:
+               switch (insn.mm_i_format.rt) {
+               case mm_bltzals_op:
+               case mm_bltzal_op:
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       /* Fall through */
+               case mm_bltz_op:
+                       if ((long)regs->regs[insn.mm_i_format.rs] < 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               case mm_bgezals_op:
+               case mm_bgezal_op:
+                       regs->regs[31] = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       /* Fall through */
+               case mm_bgez_op:
+                       if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               case mm_blez_op:
+                       if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               case mm_bgtz_op:
+                       if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               case mm_bc2f_op:
+               case mm_bc1f_op:
+                       bc_false = 1;
+                       /* Fall through */
+               case mm_bc2t_op:
+               case mm_bc1t_op:
+                       preempt_disable();
+                       if (is_fpu_owner())
+                               asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+                       else
+                               fcr31 = current->thread.fpu.fcr31;
+                       preempt_enable();
+
+                       if (bc_false)
+                               fcr31 = ~fcr31;
+
+                       bit = (insn.mm_i_format.rs >> 2);
+                       bit += (bit != 0);
+                       bit += 23;
+                       if (fcr31 & (1 << bit))
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.mm_i_format.simmediate << 1);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc + dec_insn.next_pc_inc;
+                       return 1;
+                       break;
+               }
+               break;
+       case mm_pool16c_op:
+               switch (insn.mm_i_format.rt) {
+               case mm_jalr16_op:
+               case mm_jalrs16_op:
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc + dec_insn.next_pc_inc;
+                       /* Fall through */
+               case mm_jr16_op:
+                       *contpc = regs->regs[insn.mm_i_format.rs];
+                       return 1;
+                       break;
+               }
+               break;
+       case mm_beqz16_op:
+               if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.mm_b1_format.simmediate << 1);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc + dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case mm_bnez16_op:
+               if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.mm_b1_format.simmediate << 1);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc + dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case mm_b16_op:
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                        (insn.mm_b0_format.simmediate << 1);
+               return 1;
+               break;
+       case mm_beq32_op:
+               if (regs->regs[insn.mm_i_format.rs] ==
+                   regs->regs[insn.mm_i_format.rt])
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.mm_i_format.simmediate << 1);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case mm_bne32_op:
+               if (regs->regs[insn.mm_i_format.rs] !=
+                   regs->regs[insn.mm_i_format.rt])
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.mm_i_format.simmediate << 1);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc + dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case mm_jalx32_op:
+               regs->regs[31] = regs->cp0_epc +
+                       dec_insn.pc_inc + dec_insn.next_pc_inc;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc;
+               *contpc >>= 28;
+               *contpc <<= 28;
+               *contpc |= (insn.j_format.target << 2);
+               return 1;
+               break;
+       case mm_jals32_op:
+       case mm_jal32_op:
+               regs->regs[31] = regs->cp0_epc +
+                       dec_insn.pc_inc + dec_insn.next_pc_inc;
+               /* Fall through */
+       case mm_j32_op:
+               *contpc = regs->cp0_epc + dec_insn.pc_inc;
+               *contpc >>= 27;
+               *contpc <<= 27;
+               *contpc |= (insn.j_format.target << 1);
+               set_isa16_mode(*contpc);
+               return 1;
+               break;
+       }
+       return 0;
+}
 
 /*
  * Redundant with logic already in kernel/branch.c,
@@ -117,53 +673,177 @@ static const unsigned int fpucondbit[8] = {
  * a single subroutine should be used across both
  * modules.
  */
-static int isBranchInstr(mips_instruction * i)
+static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+                        unsigned long *contpc)
 {
-       switch (MIPSInst_OPCODE(*i)) {
+       union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+       unsigned int fcr31;
+       unsigned int bit = 0;
+
+       switch (insn.i_format.opcode) {
        case spec_op:
-               switch (MIPSInst_FUNC(*i)) {
+               switch (insn.r_format.func) {
                case jalr_op:
+                       regs->regs[insn.r_format.rd] =
+                               regs->cp0_epc + dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       /* Fall through */
                case jr_op:
+                       *contpc = regs->regs[insn.r_format.rs];
                        return 1;
+                       break;
                }
                break;
-
        case bcond_op:
-               switch (MIPSInst_RT(*i)) {
+               switch (insn.i_format.rt) {
+               case bltzal_op:
+               case bltzall_op:
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       /* Fall through */
                case bltz_op:
-               case bgez_op:
                case bltzl_op:
-               case bgezl_op:
-               case bltzal_op:
+                       if ((long)regs->regs[insn.i_format.rs] < 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.i_format.simmediate << 2);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+                       return 1;
+                       break;
                case bgezal_op:
-               case bltzall_op:
                case bgezall_op:
+                       regs->regs[31] = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+                       /* Fall through */
+               case bgez_op:
+               case bgezl_op:
+                       if ((long)regs->regs[insn.i_format.rs] >= 0)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.i_format.simmediate << 2);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
                        return 1;
+                       break;
                }
                break;
-
-       case j_op:
-       case jal_op:
        case jalx_op:
+               set_isa16_mode(bit);
+       case jal_op:
+               regs->regs[31] = regs->cp0_epc +
+                       dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+               /* Fall through */
+       case j_op:
+               *contpc = regs->cp0_epc + dec_insn.pc_inc;
+               *contpc >>= 28;
+               *contpc <<= 28;
+               *contpc |= (insn.j_format.target << 2);
+               /* Set microMIPS mode bit: XOR for jalx. */
+               *contpc ^= bit;
+               return 1;
+               break;
        case beq_op:
-       case bne_op:
-       case blez_op:
-       case bgtz_op:
        case beql_op:
+               if (regs->regs[insn.i_format.rs] ==
+                   regs->regs[insn.i_format.rt])
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case bne_op:
        case bnel_op:
+               if (regs->regs[insn.i_format.rs] !=
+                   regs->regs[insn.i_format.rt])
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case blez_op:
        case blezl_op:
+               if ((long)regs->regs[insn.i_format.rs] <= 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+               return 1;
+               break;
+       case bgtz_op:
        case bgtzl_op:
+               if ((long)regs->regs[insn.i_format.rs] > 0)
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc +
+                               dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
                return 1;
-
+               break;
        case cop0_op:
        case cop1_op:
        case cop2_op:
        case cop1x_op:
-               if (MIPSInst_RS(*i) == bc_op)
-                       return 1;
+               if (insn.i_format.rs == bc_op) {
+                       preempt_disable();
+                       if (is_fpu_owner())
+                               asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+                       else
+                               fcr31 = current->thread.fpu.fcr31;
+                       preempt_enable();
+
+                       bit = (insn.i_format.rt >> 2);
+                       bit += (bit != 0);
+                       bit += 23;
+                       switch (insn.i_format.rt & 3) {
+                       case 0: /* bc1f */
+                       case 2: /* bc1fl */
+                               if (~fcr31 & (1 << bit))
+                                       *contpc = regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               (insn.i_format.simmediate << 2);
+                               else
+                                       *contpc = regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               dec_insn.next_pc_inc;
+                               return 1;
+                               break;
+                       case 1: /* bc1t */
+                       case 3: /* bc1tl */
+                               if (fcr31 & (1 << bit))
+                                       *contpc = regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               (insn.i_format.simmediate << 2);
+                               else
+                                       *contpc = regs->cp0_epc +
+                                               dec_insn.pc_inc +
+                                               dec_insn.next_pc_inc;
+                               return 1;
+                               break;
+                       }
+               }
                break;
        }
-
        return 0;
 }
 
@@ -210,26 +890,23 @@ static inline int cop1_64bit(struct pt_regs *xcp)
  */
 
 static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
-                      void *__user *fault_addr)
+               struct mm_decoded_insn dec_insn, void *__user *fault_addr)
 {
        mips_instruction ir;
-       unsigned long emulpc, contpc;
+       unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
        unsigned int cond;
-
-       if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
-               MIPS_FPU_EMU_INC_STATS(errors);
-               *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-               return SIGBUS;
-       }
-       if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
-               MIPS_FPU_EMU_INC_STATS(errors);
-               *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-               return SIGSEGV;
-       }
+       int pc_inc;
 
        /* XXX NEC Vr54xx bug workaround */
-       if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir))
-               xcp->cp0_cause &= ~CAUSEF_BD;
+       if (xcp->cp0_cause & CAUSEF_BD) {
+               if (dec_insn.micro_mips_mode) {
+                       if (!mm_isBranchInstr(xcp, dec_insn, &contpc))
+                               xcp->cp0_cause &= ~CAUSEF_BD;
+               } else {
+                       if (!isBranchInstr(xcp, dec_insn, &contpc))
+                               xcp->cp0_cause &= ~CAUSEF_BD;
+               }
+       }
 
        if (xcp->cp0_cause & CAUSEF_BD) {
                /*
@@ -244,32 +921,33 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                 * Linux MIPS branch emulator operates on context, updating the
                 * cp0_epc.
                 */
-               emulpc = xcp->cp0_epc + 4;      /* Snapshot emulation target */
+               ir = dec_insn.next_insn;  /* process delay slot instr */
+               pc_inc = dec_insn.next_pc_inc;
+       } else {
+               ir = dec_insn.insn;       /* process current instr */
+               pc_inc = dec_insn.pc_inc;
+       }
 
-               if (__compute_return_epc(xcp) < 0) {
-#ifdef CP1DBG
-                       printk("failed to emulate branch at %p\n",
-                               (void *) (xcp->cp0_epc));
-#endif
+       /*
+        * Since microMIPS FPU instructios are a subset of MIPS32 FPU
+        * instructions, we want to convert microMIPS FPU instructions
+        * into MIPS32 instructions so that we could reuse all of the
+        * FPU emulation code.
+        *
+        * NOTE: We cannot do this for branch instructions since they
+        *       are not a subset. Example: Cannot emulate a 16-bit
+        *       aligned target address with a MIPS32 instruction.
+        */
+       if (dec_insn.micro_mips_mode) {
+               /*
+                * If next instruction is a 16-bit instruction, then it
+                * it cannot be a FPU instruction. This could happen
+                * since we can be called for non-FPU instructions.
+                */
+               if ((pc_inc == 2) ||
+                       (microMIPS32_to_MIPS32((union mips_instruction *)&ir)
+                        == SIGILL))
                        return SIGILL;
-               }
-               if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
-                       MIPS_FPU_EMU_INC_STATS(errors);
-                       *fault_addr = (mips_instruction __user *)emulpc;
-                       return SIGBUS;
-               }
-               if (__get_user(ir, (mips_instruction __user *) emulpc)) {
-                       MIPS_FPU_EMU_INC_STATS(errors);
-                       *fault_addr = (mips_instruction __user *)emulpc;
-                       return SIGSEGV;
-               }
-               /* __compute_return_epc() will have updated cp0_epc */
-               contpc = xcp->cp0_epc;
-               /* In order not to confuse ptrace() et al, tweak context */
-               xcp->cp0_epc = emulpc - 4;
-       } else {
-               emulpc = xcp->cp0_epc;
-               contpc = xcp->cp0_epc + 4;
        }
 
       emul:
@@ -474,22 +1152,35 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                                /* branch taken: emulate dslot
                                 * instruction
                                 */
-                               xcp->cp0_epc += 4;
-                               contpc = (xcp->cp0_epc +
-                                       (MIPSInst_SIMM(ir) << 2));
-
-                               if (!access_ok(VERIFY_READ, xcp->cp0_epc,
-                                              sizeof(mips_instruction))) {
-                                       MIPS_FPU_EMU_INC_STATS(errors);
-                                       *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-                                       return SIGBUS;
-                               }
-                               if (__get_user(ir,
-                                   (mips_instruction __user *) xcp->cp0_epc)) {
-                                       MIPS_FPU_EMU_INC_STATS(errors);
-                                       *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-                                       return SIGSEGV;
-                               }
+                               xcp->cp0_epc += dec_insn.pc_inc;
+
+                               contpc = MIPSInst_SIMM(ir);
+                               ir = dec_insn.next_insn;
+                               if (dec_insn.micro_mips_mode) {
+                                       contpc = (xcp->cp0_epc + (contpc << 1));
+
+                                       /* If 16-bit instruction, not FPU. */
+                                       if ((dec_insn.next_pc_inc == 2) ||
+                                               (microMIPS32_to_MIPS32((union mips_instruction *)&ir) == SIGILL)) {
+
+                                               /*
+                                                * Since this instruction will
+                                                * be put on the stack with
+                                                * 32-bit words, get around
+                                                * this problem by putting a
+                                                * NOP16 as the second one.
+                                                */
+                                               if (dec_insn.next_pc_inc == 2)
+                                                       ir = (ir & (~0xffff)) | MM_NOP16;
+
+                                               /*
+                                                * Single step the non-CP1
+                                                * instruction in the dslot.
+                                                */
+                                               return mips_dsemul(xcp, ir, contpc);
+                                       }
+                               } else
+                                       contpc = (xcp->cp0_epc + (contpc << 2));
 
                                switch (MIPSInst_OPCODE(ir)) {
                                case lwc1_op:
@@ -525,8 +1216,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                                         * branch likely nullifies
                                         * dslot if not taken
                                         */
-                                       xcp->cp0_epc += 4;
-                                       contpc += 4;
+                                       xcp->cp0_epc += dec_insn.pc_inc;
+                                       contpc += dec_insn.pc_inc;
                                        /*
                                         * else continue & execute
                                         * dslot as normal insn
@@ -1313,25 +2004,75 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
        int has_fpu, void *__user *fault_addr)
 {
        unsigned long oldepc, prevepc;
-       mips_instruction insn;
+       struct mm_decoded_insn dec_insn;
+       u16 instr[4];
+       u16 *instr_ptr;
        int sig = 0;
 
        oldepc = xcp->cp0_epc;
        do {
                prevepc = xcp->cp0_epc;
 
-               if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
-                       MIPS_FPU_EMU_INC_STATS(errors);
-                       *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-                       return SIGBUS;
-               }
-               if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
-                       MIPS_FPU_EMU_INC_STATS(errors);
-                       *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
-                       return SIGSEGV;
+               if (get_isa16_mode(prevepc) && cpu_has_mmips) {
+                       /*
+                        * Get next 2 microMIPS instructions and convert them
+                        * into 32-bit instructions.
+                        */
+                       if ((get_user(instr[0], (u16 __user *)msk_isa16_mode(xcp->cp0_epc))) ||
+                           (get_user(instr[1], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 2))) ||
+                           (get_user(instr[2], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 4))) ||
+                           (get_user(instr[3], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 6)))) {
+                               MIPS_FPU_EMU_INC_STATS(errors);
+                               return SIGBUS;
+                       }
+                       instr_ptr = instr;
+
+                       /* Get first instruction. */
+                       if (mm_insn_16bit(*instr_ptr)) {
+                               /* Duplicate the half-word. */
+                               dec_insn.insn = (*instr_ptr << 16) |
+                                       (*instr_ptr);
+                               /* 16-bit instruction. */
+                               dec_insn.pc_inc = 2;
+                               instr_ptr += 1;
+                       } else {
+                               dec_insn.insn = (*instr_ptr << 16) |
+                                       *(instr_ptr+1);
+                               /* 32-bit instruction. */
+                               dec_insn.pc_inc = 4;
+                               instr_ptr += 2;
+                       }
+                       /* Get second instruction. */
+                       if (mm_insn_16bit(*instr_ptr)) {
+                               /* Duplicate the half-word. */
+                               dec_insn.next_insn = (*instr_ptr << 16) |
+                                       (*instr_ptr);
+                               /* 16-bit instruction. */
+                               dec_insn.next_pc_inc = 2;
+                       } else {
+                               dec_insn.next_insn = (*instr_ptr << 16) |
+                                       *(instr_ptr+1);
+                               /* 32-bit instruction. */
+                               dec_insn.next_pc_inc = 4;
+                       }
+                       dec_insn.micro_mips_mode = 1;
+               } else {
+                       if ((get_user(dec_insn.insn,
+                           (mips_instruction __user *) xcp->cp0_epc)) ||
+                           (get_user(dec_insn.next_insn,
+                           (mips_instruction __user *)(xcp->cp0_epc+4)))) {
+                               MIPS_FPU_EMU_INC_STATS(errors);
+                               return SIGBUS;
+                       }
+                       dec_insn.pc_inc = 4;
+                       dec_insn.next_pc_inc = 4;
+                       dec_insn.micro_mips_mode = 0;
                }
-               if (insn == 0)
-                       xcp->cp0_epc += 4;      /* skip nops */
+
+               if ((dec_insn.insn == 0) ||
+                  ((dec_insn.pc_inc == 2) &&
+                  ((dec_insn.insn & 0xffff) == MM_NOP16)))
+                       xcp->cp0_epc += dec_insn.pc_inc;        /* Skip NOPs */
                else {
                        /*
                         * The 'ieee754_csr' is an alias of
@@ -1341,7 +2082,7 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                         */
                        /* convert to ieee library modes */
                        ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
-                       sig = cop1Emulate(xcp, ctx, fault_addr);
+                       sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr);
                        /* revert to mips rounding mode */
                        ieee754_csr.rm = mips_rm[ieee754_csr.rm];
                }
index 384a3b0091ea33e507fe063c0b8b43a5ab714823..7ea622ab8dad32df6bf1fbd8d07bffdbf48c3f72 100644 (file)
@@ -55,7 +55,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
        struct emuframe __user *fr;
        int err;
 
-       if (ir == 0) {          /* a nop is easy */
+       if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) ||
+               (ir == 0)) {
+               /* NOP is easy */
                regs->cp0_epc = cpc;
                regs->cp0_cause &= ~CAUSEF_BD;
                return 0;
@@ -91,8 +93,16 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
        if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
                return SIGBUS;
 
-       err = __put_user(ir, &fr->emul);
-       err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+       if (get_isa16_mode(regs->cp0_epc)) {
+               err = __put_user(ir >> 16, (u16 __user *)(&fr->emul));
+               err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2));
+               err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst));
+               err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2));
+       } else {
+               err = __put_user(ir, &fr->emul);
+               err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+       }
+
        err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
        err |= __put_user(cpc, &fr->epc);
 
@@ -101,7 +111,8 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
                return SIGBUS;
        }
 
-       regs->cp0_epc = (unsigned long) &fr->emul;
+       regs->cp0_epc = ((unsigned long) &fr->emul) |
+               get_isa16_mode(regs->cp0_epc);
 
        flush_cache_sigtramp((unsigned long)&fr->badinst);
 
@@ -114,9 +125,10 @@ int do_dsemulret(struct pt_regs *xcp)
        unsigned long epc;
        u32 insn, cookie;
        int err = 0;
+       u16 instr[2];
 
        fr = (struct emuframe __user *)
-               (xcp->cp0_epc - sizeof(mips_instruction));
+               (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction));
 
        /*
         * If we can't even access the area, something is very wrong, but we'll
@@ -131,7 +143,13 @@ int do_dsemulret(struct pt_regs *xcp)
         *  - Is the instruction pointed to by the EPC an BREAK_MATH?
         *  - Is the following memory word the BD_COOKIE?
         */
-       err = __get_user(insn, &fr->badinst);
+       if (get_isa16_mode(xcp->cp0_epc)) {
+               err = __get_user(instr[0], (u16 __user *)(&fr->badinst));
+               err |= __get_user(instr[1], (u16 __user *)((long)(&fr->badinst) + 2));
+               insn = (instr[0] << 16) | instr[1];
+       } else {
+               err = __get_user(insn, &fr->badinst);
+       }
        err |= __get_user(cookie, &fr->cookie);
 
        if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) {
index 1dcec30ad1c43b7c5e6e06c61f5c46cf04df7eac..e87aae1f2e802835d9a2730b5378d2bd29b8b54f 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-y                          += cache.o dma-default.o extable.o fault.o \
                                   gup.o init.o mmap.o page.o page-funcs.o \
-                                  tlbex.o tlbex-fault.o uasm.o
+                                  tlbex.o tlbex-fault.o uasm-mips.o
 
 obj-$(CONFIG_32BIT)            += ioremap.o pgtable-32.o
 obj-$(CONFIG_64BIT)            += pgtable-64.o
@@ -22,3 +22,5 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
 obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
 obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
 obj-$(CONFIG_MIPS_CPU_SCACHE)  += sc-mips.o
+
+obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
index 2078915eacb9c9ff740ec2daa1a65274ad1de7a9..21813beec7a56f8c17ff493ca6000324168850fe 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/war.h>
 #include <asm/cacheflush.h> /* for run_uncached() */
 #include <asm/traps.h>
+#include <asm/dma-coherence.h>
 
 /*
  * Special Variant of smp_call_function for use by cache functions:
@@ -136,7 +137,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
                r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
 }
 
-static void (* r4k_blast_dcache)(void);
+void (* r4k_blast_dcache)(void);
+EXPORT_SYMBOL(r4k_blast_dcache);
 
 static void __cpuinit r4k_blast_dcache_setup(void)
 {
@@ -264,7 +266,8 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
                r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
 }
 
-static void (* r4k_blast_icache)(void);
+void (* r4k_blast_icache)(void);
+EXPORT_SYMBOL(r4k_blast_icache);
 
 static void __cpuinit r4k_blast_icache_setup(void)
 {
@@ -1377,20 +1380,6 @@ static void __cpuinit coherency_setup(void)
        }
 }
 
-#if defined(CONFIG_DMA_NONCOHERENT)
-
-static int __cpuinitdata coherentio;
-
-static int __init setcoherentio(char *str)
-{
-       coherentio = 1;
-
-       return 0;
-}
-
-early_param("coherentio", setcoherentio);
-#endif
-
 static void __cpuinit r4k_cache_error_setup(void)
 {
        extern char __weak except_vec2_generic;
@@ -1472,9 +1461,14 @@ void __cpuinit r4k_cache_init(void)
 
        build_clear_page();
        build_copy_page();
-#if !defined(CONFIG_MIPS_CMP)
+
+       /*
+        * We want to run CMP kernels on core with and without coherent
+        * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether
+        * or not to flush caches.
+        */
        local_r4k___flush_cache_all(NULL);
-#endif
+
        coherency_setup();
        board_cache_error_setup = r4k_cache_error_setup;
 }
index 07cec4407b0c00cf073f406738256bd363913dcf..5aeb3eb0b72f87b5f108a6d287517e53d038be2a 100644 (file)
@@ -48,6 +48,7 @@ void (*flush_icache_all)(void);
 
 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
 EXPORT_SYMBOL(flush_data_cache_page);
+EXPORT_SYMBOL(flush_icache_all);
 
 #ifdef CONFIG_DMA_NONCOHERENT
 
index f9ef83829a523fd65803b3f5b2c8fdfe77e31a8f..caf92ecb37d6966639c47e6eb277d74cb42f2012 100644 (file)
 
 #include <dma-coherence.h>
 
+int coherentio = 0;    /* User defined DMA coherency from command line. */
+EXPORT_SYMBOL_GPL(coherentio);
+int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
+
+static int __init setcoherentio(char *str)
+{
+       coherentio = 1;
+       pr_info("Hardware DMA cache coherency (command line)\n");
+       return 0;
+}
+early_param("coherentio", setcoherentio);
+
+static int __init setnocoherentio(char *str)
+{
+       coherentio = 0;
+       pr_info("Software DMA cache coherency (command line)\n");
+       return 0;
+}
+early_param("nocoherentio", setnocoherentio);
+
 static inline struct page *dma_addr_to_page(struct device *dev,
        dma_addr_t dma_addr)
 {
@@ -115,7 +135,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
 
                if (!plat_device_is_coherent(dev)) {
                        dma_cache_wback_inv((unsigned long) ret, size);
-                       ret = UNCAC_ADDR(ret);
+                       if (!hw_coherentio)
+                               ret = UNCAC_ADDR(ret);
                }
        }
 
@@ -142,7 +163,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
 
        plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
 
-       if (!plat_device_is_coherent(dev))
+       if (!plat_device_is_coherent(dev) && !hw_coherentio)
                addr = CAC_ADDR(addr);
 
        free_pages(addr, get_order(size));
index a29fba55b53e79c8d7399d67f101356379f20c73..4eb8dcfaf1ce1953760059faf151d54fe135d6a4 100644 (file)
@@ -247,6 +247,11 @@ void __cpuinit build_clear_page(void)
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
        int i;
+       static atomic_t run_once = ATOMIC_INIT(0);
+
+       if (atomic_xchg(&run_once, 1)) {
+               return;
+       }
 
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
@@ -389,6 +394,11 @@ void __cpuinit build_copy_page(void)
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
        int i;
+       static atomic_t run_once = ATOMIC_INIT(0);
+
+       if (atomic_xchg(&run_once, 1)) {
+               return;
+       }
 
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
index a63d1ed0827fefe36520b2d21877b5bd6a6767f4..4a13c150f31b18d3317c9e0e7e12ffa39bb561e4 100644 (file)
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
 #endif
 
        local_irq_save(flags);
-       old_ctx = read_c0_entryhi() & ASID_MASK;
+       old_ctx = ASID_MASK(read_c0_entryhi());
        write_c0_entrylo0(0);
        entry = r3k_have_wired_reg ? read_c0_wired() : 8;
        for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 
 #ifdef DEBUG_TLB
                printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
-                       cpu_context(cpu, mm) & ASID_MASK, start, end);
+                       ASID_MASK(cpu_context(cpu, mm)), start, end);
 #endif
                local_irq_save(flags);
                size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
                if (size <= current_cpu_data.tlbsize) {
-                       int oldpid = read_c0_entryhi() & ASID_MASK;
-                       int newpid = cpu_context(cpu, mm) & ASID_MASK;
+                       int oldpid = ASID_MASK(read_c0_entryhi());
+                       int newpid = ASID_MASK(cpu_context(cpu, mm));
 
                        start &= PAGE_MASK;
                        end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 #ifdef DEBUG_TLB
                printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
 #endif
-               newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
+               newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
                page &= PAGE_MASK;
                local_irq_save(flags);
-               oldpid = read_c0_entryhi() & ASID_MASK;
+               oldpid = ASID_MASK(read_c0_entryhi());
                write_c0_entryhi(page | newpid);
                BARRIER;
                tlb_probe();
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
        if (current->active_mm != vma->vm_mm)
                return;
 
-       pid = read_c0_entryhi() & ASID_MASK;
+       pid = ASID_MASK(read_c0_entryhi());
 
 #ifdef DEBUG_TLB
-       if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
+       if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
                printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
                       (cpu_context(cpu, vma->vm_mm)), pid);
        }
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 
                local_irq_save(flags);
                /* Save old context and create impossible VPN2 value */
-               old_ctx = read_c0_entryhi() & ASID_MASK;
+               old_ctx = ASID_MASK(read_c0_entryhi());
                old_pagemask = read_c0_pagemask();
                w = read_c0_wired();
                write_c0_wired(w + 1);
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
 #endif
 
                local_irq_save(flags);
-               old_ctx = read_c0_entryhi() & ASID_MASK;
+               old_ctx = ASID_MASK(read_c0_entryhi());
                write_c0_entrylo0(entrylo0);
                write_c0_entryhi(entryhi);
                write_c0_index(wired);
index 493131c81a29b9a1c45bf44a000596ccdf17035f..09653b290d53356517607ac51388a09412e3d033 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
+#include <linux/module.h>
 
 #include <asm/cpu.h>
 #include <asm/bootinfo.h>
@@ -94,6 +95,7 @@ void local_flush_tlb_all(void)
        FLUSH_ITLB;
        EXIT_CRITICAL(flags);
 }
+EXPORT_SYMBOL(local_flush_tlb_all);
 
 /* All entries common to a mm share an asid.  To effectively flush
    these entries, we just bump the asid. */
@@ -285,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
 
        ENTER_CRITICAL(flags);
 
-       pid = read_c0_entryhi() & ASID_MASK;
+       pid = ASID_MASK(read_c0_entryhi());
        address &= (PAGE_MASK << 1);
        write_c0_entryhi(address | pid);
        pgdp = pgd_offset(vma->vm_mm, address);
index 91c2499f806a25809259a0b9682667ce2d7f31d5..122f9207f49e7f58871cda1bb681f73370fd1286 100644 (file)
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        if (current->active_mm != vma->vm_mm)
                return;
 
-       pid = read_c0_entryhi() & ASID_MASK;
+       pid = ASID_MASK(read_c0_entryhi());
 
        local_irq_save(flags);
        address &= PAGE_MASK;
index 820e6612d744e199f379419103ddc507bd398696..4d46d37875765a3d3024bdee0d043fae9b7a23f7 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/init.h>
 #include <linux/cache.h>
 
+#include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
 #include <asm/war.h>
@@ -305,6 +306,78 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
 static int check_for_high_segbits __cpuinitdata;
 #endif
 
+static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
+                                       unsigned int i_const)
+{
+       unsigned int **p;
+
+       for (p = start; p < stop; p++) {
+#ifndef CONFIG_CPU_MICROMIPS
+               unsigned int *ip;
+
+               ip = *p;
+               *ip = (*ip & 0xffff0000) | i_const;
+#else
+               unsigned short *ip;
+
+               ip = ((unsigned short *)((unsigned int)*p - 1));
+               if ((*ip & 0xf000) == 0x4000) {
+                       *ip &= 0xfff1;
+                       *ip |= (i_const << 1);
+               } else if ((*ip & 0xf000) == 0x6000) {
+                       *ip &= 0xfff1;
+                       *ip |= ((i_const >> 2) << 1);
+               } else {
+                       ip++;
+                       *ip = i_const;
+               }
+#endif
+               local_flush_icache_range((unsigned long)ip,
+                                        (unsigned long)ip + sizeof(*ip));
+       }
+}
+
+#define asid_insn_fixup(section, const)                                        \
+do {                                                                   \
+       extern unsigned int *__start_ ## section;                       \
+       extern unsigned int *__stop_ ## section;                        \
+       insn_fixup(&__start_ ## section, &__stop_ ## section, const);   \
+} while(0)
+
+/*
+ * Caller is assumed to flush the caches before the first context switch.
+ */
+static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
+                                unsigned int version_mask,
+                                unsigned int first_version)
+{
+       extern asmlinkage void handle_ri_rdhwr_vivt(void);
+       unsigned long *vivt_exc;
+
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * Worst case optimised microMIPS addiu instructions support
+        * only a 3-bit immediate value.
+        */
+       if(inc > 7)
+               panic("Invalid ASID increment value!");
+#endif
+       asid_insn_fixup(__asid_inc, inc);
+       asid_insn_fixup(__asid_mask, mask);
+       asid_insn_fixup(__asid_version_mask, version_mask);
+       asid_insn_fixup(__asid_first_version, first_version);
+
+       /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
+       vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
+#ifdef CONFIG_CPU_MICROMIPS
+       vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
+#endif
+       vivt_exc++;
+       *vivt_exc = (*vivt_exc & ~mask) | mask;
+
+       current_cpu_data.asid_cache = first_version;
+}
+
 static int check_for_high_segbits __cpuinitdata;
 
 static unsigned int kscratch_used_mask __cpuinitdata;
@@ -1458,17 +1531,17 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
 u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
 u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned;
+u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned;
 
 static void __cpuinit build_r4000_setup_pgd(void)
 {
        const int a0 = 4;
        const int a1 = 5;
-       u32 *p = tlbmiss_handler_setup_pgd;
+       u32 *p = tlbmiss_handler_setup_pgd_array;
        struct uasm_label *l = labels;
        struct uasm_reloc *r = relocs;
 
-       memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd));
+       memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array));
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
 
@@ -1496,15 +1569,15 @@ static void __cpuinit build_r4000_setup_pgd(void)
                uasm_i_jr(&p, 31);
                UASM_i_MTC0(&p, a0, 31, pgd_reg);
        }
-       if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd))
-               panic("tlbmiss_handler_setup_pgd space exceeded");
+       if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
+               panic("tlbmiss_handler_setup_pgd_array space exceeded");
        uasm_resolve_relocs(relocs, labels);
-       pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
-                (unsigned int)(p - tlbmiss_handler_setup_pgd));
+       pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n",
+                (unsigned int)(p - tlbmiss_handler_setup_pgd_array));
 
        dump_handler("tlbmiss_handler",
-                    tlbmiss_handler_setup_pgd,
-                    ARRAY_SIZE(tlbmiss_handler_setup_pgd));
+                    tlbmiss_handler_setup_pgd_array,
+                    ARRAY_SIZE(tlbmiss_handler_setup_pgd_array));
 }
 #endif
 
@@ -2030,6 +2103,13 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
 
        uasm_l_nopage_tlbl(&l, p);
        build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+       if ((unsigned long)tlb_do_page_fault_0 & 1) {
+               uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
+               uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
+               uasm_i_jr(&p, K0);
+       } else
+#endif
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
        uasm_i_nop(&p);
 
@@ -2077,6 +2157,13 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
 
        uasm_l_nopage_tlbs(&l, p);
        build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+       if ((unsigned long)tlb_do_page_fault_1 & 1) {
+               uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+               uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+               uasm_i_jr(&p, K0);
+       } else
+#endif
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
        uasm_i_nop(&p);
 
@@ -2125,6 +2212,13 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
 
        uasm_l_nopage_tlbm(&l, p);
        build_restore_work_registers(&p);
+#ifdef CONFIG_CPU_MICROMIPS
+       if ((unsigned long)tlb_do_page_fault_1 & 1) {
+               uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
+               uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
+               uasm_i_jr(&p, K0);
+       } else
+#endif
        uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
        uasm_i_nop(&p);
 
@@ -2162,8 +2256,12 @@ void __cpuinit build_tlb_refill_handler(void)
        case CPU_TX3922:
        case CPU_TX3927:
 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
-               build_r3000_tlb_refill_handler();
+               setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
+               if (cpu_has_local_ebase)
+                       build_r3000_tlb_refill_handler();
                if (!run_once) {
+                       if (!cpu_has_local_ebase)
+                               build_r3000_tlb_refill_handler();
                        build_r3000_tlb_load_handler();
                        build_r3000_tlb_store_handler();
                        build_r3000_tlb_modify_handler();
@@ -2184,6 +2282,11 @@ void __cpuinit build_tlb_refill_handler(void)
                break;
 
        default:
+#ifndef CONFIG_MIPS_MT_SMTC
+               setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
+#else
+               setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
+#endif
                if (!run_once) {
                        scratch_reg = allocate_kscratch();
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
@@ -2192,9 +2295,12 @@ void __cpuinit build_tlb_refill_handler(void)
                        build_r4000_tlb_load_handler();
                        build_r4000_tlb_store_handler();
                        build_r4000_tlb_modify_handler();
+                       if (!cpu_has_local_ebase)
+                               build_r4000_tlb_refill_handler();
                        run_once++;
                }
-               build_r4000_tlb_refill_handler();
+               if (cpu_has_local_ebase)
+                       build_r4000_tlb_refill_handler();
        }
 }
 
@@ -2207,7 +2313,7 @@ void __cpuinit flush_tlb_handlers(void)
        local_flush_icache_range((unsigned long)handle_tlbm,
                           (unsigned long)handle_tlbm + sizeof(handle_tlbm));
 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-       local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
-                          (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm));
+       local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array,
+                          (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm));
 #endif
 }
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
new file mode 100644 (file)
index 0000000..162ee6d
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
+ * Copyright (C) 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013   MIPS Technologies, Inc.  All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#define UASM_ISA       _UASM_ISA_MICROMIPS
+#include <asm/uasm.h>
+
+#define RS_MASK                0x1f
+#define RS_SH          16
+#define RT_MASK                0x1f
+#define RT_SH          21
+#define SCIMM_MASK     0x3ff
+#define SCIMM_SH       16
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f)                                    \
+       ((a) << OP_SH                                           \
+        | (b) << RT_SH                                         \
+        | (c) << RS_SH                                         \
+        | (d) << RD_SH                                         \
+        | (e) << RE_SH                                         \
+        | (f) << FUNC_SH)
+
+/* Define these when we are not the ISA the kernel is being compiled with. */
+#ifndef CONFIG_CPU_MICROMIPS
+#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
+#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
+#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
+#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
+#endif
+
+#include "uasm.c"
+
+static struct insn insn_table_MM[] __uasminitdata = {
+       { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
+       { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+       { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
+       { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+       { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_beql, 0, 0 },
+       { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM },
+       { insn_bgezl, 0, 0 },
+       { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM },
+       { insn_bltzl, 0, 0 },
+       { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
+       { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
+       { insn_daddu, 0, 0 },
+       { insn_daddiu, 0, 0 },
+       { insn_dmfc0, 0, 0 },
+       { insn_dmtc0, 0, 0 },
+       { insn_dsll, 0, 0 },
+       { insn_dsll32, 0, 0 },
+       { insn_dsra, 0, 0 },
+       { insn_dsrl, 0, 0 },
+       { insn_dsrl32, 0, 0 },
+       { insn_drotr, 0, 0 },
+       { insn_drotr32, 0, 0 },
+       { insn_dsubu, 0, 0 },
+       { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 },
+       { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE },
+       { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE },
+       { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM },
+       { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM },
+       { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
+       { insn_ld, 0, 0 },
+       { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
+       { insn_lld, 0, 0 },
+       { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
+       { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+       { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD },
+       { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
+       { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
+       { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+       { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM },
+       { insn_rfe, 0, 0 },
+       { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM },
+       { insn_scd, 0, 0 },
+       { insn_sd, 0, 0 },
+       { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
+       { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
+       { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD },
+       { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD },
+       { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD },
+       { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+       { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 },
+       { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 },
+       { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 },
+       { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 },
+       { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD },
+       { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
+       { insn_dins, 0, 0 },
+       { insn_dinsm, 0, 0 },
+       { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM},
+       { insn_bbit0, 0, 0 },
+       { insn_bbit1, 0, 0 },
+       { insn_lwx, 0, 0 },
+       { insn_ldx, 0, 0 },
+       { insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __uasminit u32 build_bimm(s32 arg)
+{
+       WARN(arg > 0xffff || arg < -0x10000,
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+       return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
+}
+
+static inline __uasminit u32 build_jimm(u32 arg)
+{
+
+       WARN(arg & ~((JIMM_MASK << 2) | 1),
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       return (arg >> 1) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+{
+       struct insn *ip = NULL;
+       unsigned int i;
+       va_list ap;
+       u32 op;
+
+       for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++)
+               if (insn_table_MM[i].opcode == opc) {
+                       ip = &insn_table_MM[i];
+                       break;
+               }
+
+       if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+               panic("Unsupported Micro-assembler instruction %d", opc);
+
+       op = ip->match;
+       va_start(ap, opc);
+       if (ip->fields & RS) {
+               if (opc == insn_mfc0 || opc == insn_mtc0)
+                       op |= build_rt(va_arg(ap, u32));
+               else
+                       op |= build_rs(va_arg(ap, u32));
+       }
+       if (ip->fields & RT) {
+               if (opc == insn_mfc0 || opc == insn_mtc0)
+                       op |= build_rs(va_arg(ap, u32));
+               else
+                       op |= build_rt(va_arg(ap, u32));
+       }
+       if (ip->fields & RD)
+               op |= build_rd(va_arg(ap, u32));
+       if (ip->fields & RE)
+               op |= build_re(va_arg(ap, u32));
+       if (ip->fields & SIMM)
+               op |= build_simm(va_arg(ap, s32));
+       if (ip->fields & UIMM)
+               op |= build_uimm(va_arg(ap, u32));
+       if (ip->fields & BIMM)
+               op |= build_bimm(va_arg(ap, s32));
+       if (ip->fields & JIMM)
+               op |= build_jimm(va_arg(ap, u32));
+       if (ip->fields & FUNC)
+               op |= build_func(va_arg(ap, u32));
+       if (ip->fields & SET)
+               op |= build_set(va_arg(ap, u32));
+       if (ip->fields & SCIMM)
+               op |= build_scimm(va_arg(ap, u32));
+       va_end(ap);
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+       **buf = ((op & 0xffff) << 16) | (op >> 16);
+#else
+       **buf = op;
+#endif
+       (*buf)++;
+}
+
+static inline void __uasminit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+       long laddr = (long)lab->addr;
+       long raddr = (long)rel->addr;
+
+       switch (rel->type) {
+       case R_MIPS_PC16:
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+               *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16);
+#else
+               *rel->addr |= build_bimm(laddr - (raddr + 4));
+#endif
+               break;
+
+       default:
+               panic("Unsupported Micro-assembler relocation %d",
+                     rel->type);
+       }
+}
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
new file mode 100644 (file)
index 0000000..5fcdd8f
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * A small micro-assembler. It is intentionally kept simple, does only
+ * support a subset of instructions, and does not try to hide pipeline
+ * effects like branch delay slots.
+ *
+ * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
+ * Copyright (C) 2005, 2007  Maciej W. Rozycki
+ * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/inst.h>
+#include <asm/elf.h>
+#include <asm/bugs.h>
+#define UASM_ISA       _UASM_ISA_CLASSIC
+#include <asm/uasm.h>
+
+#define RS_MASK                0x1f
+#define RS_SH          21
+#define RT_MASK                0x1f
+#define RT_SH          16
+#define SCIMM_MASK     0xfffff
+#define SCIMM_SH       6
+
+/* This macro sets the non-variable bits of an instruction. */
+#define M(a, b, c, d, e, f)                                    \
+       ((a) << OP_SH                                           \
+        | (b) << RS_SH                                         \
+        | (c) << RT_SH                                         \
+        | (d) << RD_SH                                         \
+        | (e) << RE_SH                                         \
+        | (f) << FUNC_SH)
+
+/* Define these when we are not the ISA the kernel is being compiled with. */
+#ifdef CONFIG_CPU_MICROMIPS
+#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
+#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
+#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
+#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
+#endif
+
+#include "uasm.c"
+
+static struct insn insn_table[] __uasminitdata = {
+       { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+       { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
+       { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+       { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
+       { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
+       { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
+       { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
+       { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
+       { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+       { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
+       { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
+       { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
+       { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
+       { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
+       { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
+       { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
+       { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
+       { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
+       { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
+       { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
+       { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
+       { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
+       { insn_eret,  M(cop0_op, cop_op, 0, 0, 0, eret_op),  0 },
+       { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
+       { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
+       { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+       { insn_jal,  M(jal_op, 0, 0, 0, 0, 0),  JIMM },
+       { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+       { insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
+       { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
+       { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
+       { insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
+       { insn_mfc0,  M(cop0_op, mfc_op, 0, 0, 0, 0),  RT | RD | SET},
+       { insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET},
+       { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
+       { insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
+       { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
+       { insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
+       { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
+       { insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
+       { insn_srl,  M(spec_op, 0, 0, 0, 0, srl_op),  RT | RD | RE },
+       { insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),  RS | RT | RD },
+       { insn_sw,  M(sw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
+       { insn_tlbp,  M(cop0_op, cop_op, 0, 0, 0, tlbp_op),  0 },
+       { insn_tlbr,  M(cop0_op, cop_op, 0, 0, 0, tlbr_op),  0 },
+       { insn_tlbwi,  M(cop0_op, cop_op, 0, 0, 0, tlbwi_op),  0 },
+       { insn_tlbwr,  M(cop0_op, cop_op, 0, 0, 0, tlbwr_op),  0 },
+       { insn_xori,  M(xori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
+       { insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD },
+       { insn_invalid, 0, 0 }
+};
+
+#undef M
+
+static inline __uasminit u32 build_bimm(s32 arg)
+{
+       WARN(arg > 0x1ffff || arg < -0x20000,
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
+
+       return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
+}
+
+static inline __uasminit u32 build_jimm(u32 arg)
+{
+       WARN(arg & ~(JIMM_MASK << 2),
+            KERN_WARNING "Micro-assembler field overflow\n");
+
+       return (arg >> 2) & JIMM_MASK;
+}
+
+/*
+ * The order of opcode arguments is implicitly left to right,
+ * starting with RS and ending with FUNC or IMM.
+ */
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
+{
+       struct insn *ip = NULL;
+       unsigned int i;
+       va_list ap;
+       u32 op;
+
+       for (i = 0; insn_table[i].opcode != insn_invalid; i++)
+               if (insn_table[i].opcode == opc) {
+                       ip = &insn_table[i];
+                       break;
+               }
+
+       if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
+               panic("Unsupported Micro-assembler instruction %d", opc);
+
+       op = ip->match;
+       va_start(ap, opc);
+       if (ip->fields & RS)
+               op |= build_rs(va_arg(ap, u32));
+       if (ip->fields & RT)
+               op |= build_rt(va_arg(ap, u32));
+       if (ip->fields & RD)
+               op |= build_rd(va_arg(ap, u32));
+       if (ip->fields & RE)
+               op |= build_re(va_arg(ap, u32));
+       if (ip->fields & SIMM)
+               op |= build_simm(va_arg(ap, s32));
+       if (ip->fields & UIMM)
+               op |= build_uimm(va_arg(ap, u32));
+       if (ip->fields & BIMM)
+               op |= build_bimm(va_arg(ap, s32));
+       if (ip->fields & JIMM)
+               op |= build_jimm(va_arg(ap, u32));
+       if (ip->fields & FUNC)
+               op |= build_func(va_arg(ap, u32));
+       if (ip->fields & SET)
+               op |= build_set(va_arg(ap, u32));
+       if (ip->fields & SCIMM)
+               op |= build_scimm(va_arg(ap, u32));
+       va_end(ap);
+
+       **buf = op;
+       (*buf)++;
+}
+
+static inline void __uasminit
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+{
+       long laddr = (long)lab->addr;
+       long raddr = (long)rel->addr;
+
+       switch (rel->type) {
+       case R_MIPS_PC16:
+               *rel->addr |= build_bimm(laddr - (raddr + 4));
+               break;
+
+       default:
+               panic("Unsupported Micro-assembler relocation %d",
+                     rel->type);
+       }
+}
index 942ff6c2eba27ac256d812c64a1b5752b164f712..7eb5e4355d25c467fd1241ffda14715dfc69c1c7 100644 (file)
  * Copyright (C) 2004, 2005, 2006, 2008         Thiemo Seufer
  * Copyright (C) 2005, 2007  Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012, 2013  MIPS Technologies, Inc.  All rights reserved.
  */
 
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <asm/inst.h>
-#include <asm/elf.h>
-#include <asm/bugs.h>
-#include <asm/uasm.h>
-
 enum fields {
        RS = 0x001,
        RT = 0x002,
@@ -37,10 +29,6 @@ enum fields {
 
 #define OP_MASK                0x3f
 #define OP_SH          26
-#define RS_MASK                0x1f
-#define RS_SH          21
-#define RT_MASK                0x1f
-#define RT_SH          16
 #define RD_MASK                0x1f
 #define RD_SH          11
 #define RE_MASK                0x1f
@@ -53,8 +41,6 @@ enum fields {
 #define FUNC_SH                0
 #define SET_MASK       0x7
 #define SET_SH         0
-#define SCIMM_MASK     0xfffff
-#define SCIMM_SH       6
 
 enum opcode {
        insn_invalid,
@@ -77,85 +63,6 @@ struct insn {
        enum fields fields;
 };
 
-/* This macro sets the non-variable bits of an instruction. */
-#define M(a, b, c, d, e, f)                                    \
-       ((a) << OP_SH                                           \
-        | (b) << RS_SH                                         \
-        | (c) << RT_SH                                         \
-        | (d) << RD_SH                                         \
-        | (e) << RE_SH                                         \
-        | (f) << FUNC_SH)
-
-static struct insn insn_table[] __uasminitdata = {
-       { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
-       { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
-       { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
-       { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
-       { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
-       { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
-       { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
-       { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
-       { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
-       { insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
-       { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
-       { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
-       { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
-       { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
-       { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
-       { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
-       { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
-       { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
-       { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
-       { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
-       { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
-       { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
-       { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
-       { insn_eret,  M(cop0_op, cop_op, 0, 0, 0, eret_op),  0 },
-       { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
-       { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
-       { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
-       { insn_jal,  M(jal_op, 0, 0, 0, 0, 0),  JIMM },
-       { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
-       { insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
-       { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
-       { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
-       { insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
-       { insn_mfc0,  M(cop0_op, mfc_op, 0, 0, 0, 0),  RT | RD | SET},
-       { insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET},
-       { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
-       { insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
-       { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
-       { insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
-       { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
-       { insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
-       { insn_srl,  M(spec_op, 0, 0, 0, 0, srl_op),  RT | RD | RE },
-       { insn_subu,  M(spec_op, 0, 0, 0, 0, subu_op),  RS | RT | RD },
-       { insn_sw,  M(sw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
-       { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
-       { insn_tlbp,  M(cop0_op, cop_op, 0, 0, 0, tlbp_op),  0 },
-       { insn_tlbr,  M(cop0_op, cop_op, 0, 0, 0, tlbr_op),  0 },
-       { insn_tlbwi,  M(cop0_op, cop_op, 0, 0, 0, tlbwi_op),  0 },
-       { insn_tlbwr,  M(cop0_op, cop_op, 0, 0, 0, tlbwr_op),  0 },
-       { insn_xori,  M(xori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
-       { insn_xor,  M(spec_op, 0, 0, 0, 0, xor_op),  RS | RT | RD },
-       { insn_invalid, 0, 0 }
-};
-
-#undef M
-
 static inline __uasminit u32 build_rs(u32 arg)
 {
        WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -199,24 +106,6 @@ static inline __uasminit u32 build_uimm(u32 arg)
        return arg & IMM_MASK;
 }
 
-static inline __uasminit u32 build_bimm(s32 arg)
-{
-       WARN(arg > 0x1ffff || arg < -0x20000,
-            KERN_WARNING "Micro-assembler field overflow\n");
-
-       WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n");
-
-       return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
-}
-
-static inline __uasminit u32 build_jimm(u32 arg)
-{
-       WARN(arg & ~(JIMM_MASK << 2),
-            KERN_WARNING "Micro-assembler field overflow\n");
-
-       return (arg >> 2) & JIMM_MASK;
-}
-
 static inline __uasminit u32 build_scimm(u32 arg)
 {
        WARN(arg & ~SCIMM_MASK,
@@ -239,55 +128,7 @@ static inline __uasminit u32 build_set(u32 arg)
        return arg & SET_MASK;
 }
 
-/*
- * The order of opcode arguments is implicitly left to right,
- * starting with RS and ending with FUNC or IMM.
- */
-static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
-{
-       struct insn *ip = NULL;
-       unsigned int i;
-       va_list ap;
-       u32 op;
-
-       for (i = 0; insn_table[i].opcode != insn_invalid; i++)
-               if (insn_table[i].opcode == opc) {
-                       ip = &insn_table[i];
-                       break;
-               }
-
-       if (!ip || (opc == insn_daddiu && r4k_daddiu_bug()))
-               panic("Unsupported Micro-assembler instruction %d", opc);
-
-       op = ip->match;
-       va_start(ap, opc);
-       if (ip->fields & RS)
-               op |= build_rs(va_arg(ap, u32));
-       if (ip->fields & RT)
-               op |= build_rt(va_arg(ap, u32));
-       if (ip->fields & RD)
-               op |= build_rd(va_arg(ap, u32));
-       if (ip->fields & RE)
-               op |= build_re(va_arg(ap, u32));
-       if (ip->fields & SIMM)
-               op |= build_simm(va_arg(ap, s32));
-       if (ip->fields & UIMM)
-               op |= build_uimm(va_arg(ap, u32));
-       if (ip->fields & BIMM)
-               op |= build_bimm(va_arg(ap, s32));
-       if (ip->fields & JIMM)
-               op |= build_jimm(va_arg(ap, u32));
-       if (ip->fields & FUNC)
-               op |= build_func(va_arg(ap, u32));
-       if (ip->fields & SET)
-               op |= build_set(va_arg(ap, u32));
-       if (ip->fields & SCIMM)
-               op |= build_scimm(va_arg(ap, u32));
-       va_end(ap);
-
-       **buf = op;
-       (*buf)++;
-}
+static void __uasminit build_insn(u32 **buf, enum opcode opc, ...);
 
 #define I_u1u2u3(op)                                   \
 Ip_u1u2u3(op)                                          \
@@ -445,7 +286,7 @@ I_u3u1u2(_ldx)
 
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 #include <asm/octeon/octeon.h>
-void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
+void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
                            unsigned int c)
 {
        if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -457,21 +298,21 @@ void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b,
        else
                build_insn(buf, insn_pref, c, a, b);
 }
-UASM_EXPORT_SYMBOL(uasm_i_pref);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
 #else
 I_u2s3u1(_pref)
 #endif
 
 /* Handle labels. */
-void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
+void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
 {
        (*lab)->addr = addr;
        (*lab)->lab = lid;
        (*lab)++;
 }
-UASM_EXPORT_SYMBOL(uasm_build_label);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
 
-int __uasminit uasm_in_compat_space_p(long addr)
+int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
 {
        /* Is this address in 32bit compat space? */
 #ifdef CONFIG_64BIT
@@ -480,7 +321,7 @@ int __uasminit uasm_in_compat_space_p(long addr)
        return 1;
 #endif
 }
-UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
 
 static int __uasminit uasm_rel_highest(long val)
 {
@@ -500,77 +341,66 @@ static int __uasminit uasm_rel_higher(long val)
 #endif
 }
 
-int __uasminit uasm_rel_hi(long val)
+int __uasminit ISAFUNC(uasm_rel_hi)(long val)
 {
        return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
 }
-UASM_EXPORT_SYMBOL(uasm_rel_hi);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
 
-int __uasminit uasm_rel_lo(long val)
+int __uasminit ISAFUNC(uasm_rel_lo)(long val)
 {
        return ((val & 0xffff) ^ 0x8000) - 0x8000;
 }
-UASM_EXPORT_SYMBOL(uasm_rel_lo);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
 
-void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
+void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
 {
-       if (!uasm_in_compat_space_p(addr)) {
-               uasm_i_lui(buf, rs, uasm_rel_highest(addr));
+       if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
+               ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
                if (uasm_rel_higher(addr))
-                       uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
-               if (uasm_rel_hi(addr)) {
-                       uasm_i_dsll(buf, rs, rs, 16);
-                       uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr));
-                       uasm_i_dsll(buf, rs, rs, 16);
+                       ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
+               if (ISAFUNC(uasm_rel_hi(addr))) {
+                       ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
+                       ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
+                                       ISAFUNC(uasm_rel_hi)(addr));
+                       ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
                } else
-                       uasm_i_dsll32(buf, rs, rs, 0);
+                       ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
        } else
-               uasm_i_lui(buf, rs, uasm_rel_hi(addr));
+               ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
 }
-UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
+UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
 
-void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
+void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
 {
-       UASM_i_LA_mostly(buf, rs, addr);
-       if (uasm_rel_lo(addr)) {
-               if (!uasm_in_compat_space_p(addr))
-                       uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr));
+       ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
+       if (ISAFUNC(uasm_rel_lo(addr))) {
+               if (!ISAFUNC(uasm_in_compat_space_p)(addr))
+                       ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
+                                       ISAFUNC(uasm_rel_lo(addr)));
                else
-                       uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr));
+                       ISAFUNC(uasm_i_addiu)(buf, rs, rs,
+                                       ISAFUNC(uasm_rel_lo(addr)));
        }
 }
-UASM_EXPORT_SYMBOL(UASM_i_LA);
+UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
 
 /* Handle relocations. */
 void __uasminit
-uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
+ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
 {
        (*rel)->addr = addr;
        (*rel)->type = R_MIPS_PC16;
        (*rel)->lab = lid;
        (*rel)++;
 }
-UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
 
 static inline void __uasminit
-__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
-{
-       long laddr = (long)lab->addr;
-       long raddr = (long)rel->addr;
-
-       switch (rel->type) {
-       case R_MIPS_PC16:
-               *rel->addr |= build_bimm(laddr - (raddr + 4));
-               break;
-
-       default:
-               panic("Unsupported Micro-assembler relocation %d",
-                     rel->type);
-       }
-}
+__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
 
 void __uasminit
-uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
+ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
 {
        struct uasm_label *l;
 
@@ -579,40 +409,40 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
                        if (rel->lab == l->lab)
                                __resolve_relocs(rel, l);
 }
-UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
 
 void __uasminit
-uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
+ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
 {
        for (; rel->lab != UASM_LABEL_INVALID; rel++)
                if (rel->addr >= first && rel->addr < end)
                        rel->addr += off;
 }
-UASM_EXPORT_SYMBOL(uasm_move_relocs);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
 
 void __uasminit
-uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
+ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off)
 {
        for (; lab->lab != UASM_LABEL_INVALID; lab++)
                if (lab->addr >= first && lab->addr < end)
                        lab->addr += off;
 }
-UASM_EXPORT_SYMBOL(uasm_move_labels);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
 
 void __uasminit
-uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
+ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
                  u32 *end, u32 *target)
 {
        long off = (long)(target - first);
 
        memcpy(target, first, (end - first) * sizeof(u32));
 
-       uasm_move_relocs(rel, first, end, off);
-       uasm_move_labels(lab, first, end, off);
+       ISAFUNC(uasm_move_relocs(rel, first, end, off));
+       ISAFUNC(uasm_move_labels(lab, first, end, off));
 }
-UASM_EXPORT_SYMBOL(uasm_copy_handler);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
 
-int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
+int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
 {
        for (; rel->lab != UASM_LABEL_INVALID; rel++) {
                if (rel->addr == addr
@@ -623,88 +453,88 @@ int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
 
        return 0;
 }
-UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
 
 /* Convenience functions for labeled branches. */
 void __uasminit
-uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bltz(p, reg, 0);
+       ISAFUNC(uasm_i_bltz)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bltz);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
 
 void __uasminit
-uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
+ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_b(p, 0);
+       ISAFUNC(uasm_i_b)(p, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_b);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
 
 void __uasminit
-uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_beqz(p, reg, 0);
+       ISAFUNC(uasm_i_beqz)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_beqz);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
 
 void __uasminit
-uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_beqzl(p, reg, 0);
+       ISAFUNC(uasm_i_beqzl)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_beqzl);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
 
 void __uasminit
-uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
        unsigned int reg2, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bne(p, reg1, reg2, 0);
+       ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bne);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
 
 void __uasminit
-uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bnez(p, reg, 0);
+       ISAFUNC(uasm_i_bnez)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bnez);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
 
 void __uasminit
-uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bgezl(p, reg, 0);
+       ISAFUNC(uasm_i_bgezl)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bgezl);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
 
 void __uasminit
-uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
+ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bgez(p, reg, 0);
+       ISAFUNC(uasm_i_bgez)(p, reg, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bgez);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
 
 void __uasminit
-uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
              unsigned int bit, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bbit0(p, reg, bit, 0);
+       ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bbit0);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
 
 void __uasminit
-uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
              unsigned int bit, int lid)
 {
        uasm_r_mips_pc16(r, *p, lid);
-       uasm_i_bbit1(p, reg, bit, 0);
+       ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
 }
-UASM_EXPORT_SYMBOL(uasm_il_bbit1);
+UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));
index 6079ef33b5f0670e0e3f4de945bd287a821a7381..0388fc8b5613430152a0f097c7e7d36e15aebdc6 100644 (file)
@@ -5,9 +5,8 @@
 # Copyright (C) 2008 Wind River Systems, Inc.
 #   written by Ralf Baechle <ralf@linux-mips.org>
 #
-obj-y                          := malta-amon.o malta-cmdline.o \
-                                  malta-display.o malta-init.o malta-int.o \
-                                  malta-memory.o malta-platform.o \
+obj-y                          := malta-amon.o malta-display.o malta-init.o \
+                                  malta-int.o malta-memory.o malta-platform.o \
                                   malta-reset.o malta-setup.o malta-time.o
 
 obj-$(CONFIG_EARLY_PRINTK)     += malta-console.o
index 5b548b5a4fcf1f5bf116d6dd59e4c42755193c4c..2cc72c9b38e3ba508a1d9b6596da4403f67e6539 100644 (file)
@@ -3,5 +3,9 @@
 #
 platform-$(CONFIG_MIPS_MALTA)  += mti-malta/
 cflags-$(CONFIG_MIPS_MALTA)    += -I$(srctree)/arch/mips/include/asm/mach-malta
-load-$(CONFIG_MIPS_MALTA)      += 0xffffffff80100000
+ifdef CONFIG_KVM_GUEST
+    load-$(CONFIG_MIPS_MALTA)  += 0x0000000040100000
+else
+    load-$(CONFIG_MIPS_MALTA)  += 0xffffffff80100000
+endif
 all-$(CONFIG_MIPS_MALTA)       := $(COMPRESSION_FNAME).bin
diff --git a/arch/mips/mti-malta/malta-cmdline.c b/arch/mips/mti-malta/malta-cmdline.c
deleted file mode 100644 (file)
index 5576a30..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Kernel command line creation using the prom monitor (YAMON) argc/argv.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-
-extern int prom_argc;
-extern int *_prom_argv;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension.
- */
-#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
-
-char * __init prom_getcmdline(void)
-{
-       return &(arcs_cmdline[0]);
-}
-
-
-void  __init prom_init_cmdline(void)
-{
-       char *cp;
-       int actr;
-
-       actr = 1; /* Always ignore argv[0] */
-
-       cp = &(arcs_cmdline[0]);
-       while(actr < prom_argc) {
-               strcpy(cp, prom_argv(actr));
-               cp += strlen(prom_argv(actr));
-               *cp++ = ' ';
-               actr++;
-       }
-       if (cp != &(arcs_cmdline[0])) {
-               /* get rid of trailing space */
-               --cp;
-               *cp = '\0';
-       }
-}
index 9bc58a24e80a637307e13058eab6d54eacf91c88..d4f807191ecd74694075ba3ca2748ff0b80bcbb1 100644 (file)
@@ -1,28 +1,20 @@
 /*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
  * Display routines for display messages in MIPS boards ascii display.
+ *
+ * Copyright (C) 1999,2000,2012  MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
-
 #include <linux/compiler.h>
 #include <linux/timer.h>
-#include <asm/io.h>
+#include <linux/io.h>
+
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 
 extern const char display_string[];
 static unsigned int display_count;
@@ -36,11 +28,11 @@ void mips_display_message(const char *str)
        if (unlikely(display == NULL))
                display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
 
-       for (i = 0; i <= 14; i=i+2) {
-                if (*str)
-                        __raw_writel(*str++, display + i);
-                else
-                        __raw_writel(' ', display + i);
+       for (i = 0; i <= 14; i += 2) {
+               if (*str)
+                       __raw_writel(*str++, display + i);
+               else
+                       __raw_writel(' ', display + i);
        }
 }
 
index c2cbce9e435e8ffc74d25a9cfff61acbc38dc1b4..ff8caffd3266ea0a1deaaa1ee1093eac50b600b9 100644 (file)
@@ -1,54 +1,28 @@
 /*
- * Copyright (C) 1999, 2000, 2004, 2005         MIPS Technologies, Inc.
- *     All rights reserved.
- *     Authors: Carsten Langgaard <carstenl@mips.com>
- *              Maciej W. Rozycki <macro@mips.com>
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
  * PROM library initialisation code.
+ *
+ * Copyright (C) 1999,2000,2004,2005,2012  MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *         Maciej W. Rozycki <macro@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
 
-#include <asm/bootinfo.h>
-#include <asm/gt64120.h>
-#include <asm/io.h>
 #include <asm/cacheflush.h>
 #include <asm/smp-ops.h>
 #include <asm/traps.h>
-
+#include <asm/fw/fw.h>
 #include <asm/gcmpregs.h>
-#include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/bonito64.h>
-#include <asm/mips-boards/msc01_pci.h>
-
 #include <asm/mips-boards/malta.h>
 
-int prom_argc;
-int *_prom_argv, *_prom_envp;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension, if running in 64-bit mode.
- */
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-int init_debug;
-
 static int mips_revision_corid;
 int mips_revision_sconid;
 
@@ -62,74 +36,6 @@ unsigned long _pcictrl_gt64120;
 /* MIPS System controller register base */
 unsigned long _pcictrl_msc;
 
-char *prom_getenv(char *envname)
-{
-       /*
-        * Return a pointer to the given environment variable.
-        * In 64-bit mode: we're using 64-bit pointers, but all pointers
-        * in the PROM structures are only 32-bit, so we need some
-        * workarounds, if we are running in 64-bit mode.
-        */
-       int i, index=0;
-
-       i = strlen(envname);
-
-       while (prom_envp(index)) {
-               if(strncmp(envname, prom_envp(index), i) == 0) {
-                       return(prom_envp(index+1));
-               }
-               index += 2;
-       }
-
-       return NULL;
-}
-
-static inline unsigned char str2hexnum(unsigned char c)
-{
-       if (c >= '0' && c <= '9')
-               return c - '0';
-       if (c >= 'a' && c <= 'f')
-               return c - 'a' + 10;
-       return 0; /* foo */
-}
-
-static inline void str2eaddr(unsigned char *ea, unsigned char *str)
-{
-       int i;
-
-       for (i = 0; i < 6; i++) {
-               unsigned char num;
-
-               if((*str == '.') || (*str == ':'))
-                       str++;
-               num = str2hexnum(*str++) << 4;
-               num |= (str2hexnum(*str++));
-               ea[i] = num;
-       }
-}
-
-int get_ethernet_addr(char *ethernet_addr)
-{
-       char *ethaddr_str;
-
-       ethaddr_str = prom_getenv("ethaddr");
-       if (!ethaddr_str) {
-               printk("ethaddr not set in boot prom\n");
-               return -1;
-       }
-       str2eaddr(ethernet_addr, ethaddr_str);
-
-       if (init_debug > 1) {
-               int i;
-               printk("get_ethernet_addr: ");
-               for (i=0; i<5; i++)
-                       printk("%02x:", (unsigned char)*(ethernet_addr+i));
-               printk("%02x\n", *(ethernet_addr+i));
-       }
-
-       return 0;
-}
-
 #ifdef CONFIG_SERIAL_8250_CONSOLE
 static void __init console_config(void)
 {
@@ -138,17 +44,23 @@ static void __init console_config(void)
        char parity = '\0', bits = '\0', flow = '\0';
        char *s;
 
-       if ((strstr(prom_getcmdline(), "console=")) == NULL) {
-               s = prom_getenv("modetty0");
+       if ((strstr(fw_getcmdline(), "console=")) == NULL) {
+               s = fw_getenv("modetty0");
                if (s) {
                        while (*s >= '0' && *s <= '9')
                                baud = baud*10 + *s++ - '0';
-                       if (*s == ',') s++;
-                       if (*s) parity = *s++;
-                       if (*s == ',') s++;
-                       if (*s) bits = *s++;
-                       if (*s == ',') s++;
-                       if (*s == 'h') flow = 'r';
+                       if (*s == ',')
+                               s++;
+                       if (*s)
+                               parity = *s++;
+                       if (*s == ',')
+                               s++;
+                       if (*s)
+                               bits = *s++;
+                       if (*s == ',')
+                               s++;
+                       if (*s == 'h')
+                               flow = 'r';
                }
                if (baud == 0)
                        baud = 38400;
@@ -158,8 +70,9 @@ static void __init console_config(void)
                        bits = '8';
                if (flow == '\0')
                        flow = 'r';
-               sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, parity, bits, flow);
-               strcat(prom_getcmdline(), console_string);
+               sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+                       parity, bits, flow);
+               strcat(fw_getcmdline(), console_string);
                pr_info("Config serial console:%s\n", console_string);
        }
 }
@@ -193,10 +106,6 @@ extern struct plat_smp_ops msmtc_smp_ops;
 
 void __init prom_init(void)
 {
-       prom_argc = fw_arg0;
-       _prom_argv = (int *) fw_arg1;
-       _prom_envp = (int *) fw_arg2;
-
        mips_display_message("LINUX");
 
        /*
@@ -306,7 +215,7 @@ void __init prom_init(void)
        case MIPS_REVISION_SCON_SOCIT:
        case MIPS_REVISION_SCON_ROCIT:
                _pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000);
-       mips_pci_controller:
+mips_pci_controller:
                mb();
                MSC_READ(MSC01_PCI_CFG, data);
                MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT);
@@ -348,13 +257,13 @@ void __init prom_init(void)
        default:
                /* Unknown system controller */
                mips_display_message("SC Error");
-               while (1);   /* We die here... */
+               while (1);      /* We die here... */
        }
        board_nmi_handler_setup = mips_nmi_setup;
        board_ejtag_handler_setup = mips_ejtag_setup;
 
-       prom_init_cmdline();
-       prom_meminit();
+       fw_init_cmdline();
+       fw_meminit();
 #ifdef CONFIG_SERIAL_8250_CONSOLE
        console_config();
 #endif
index e364af70e6cf5cc255a9be51ca569d7cb911dbf0..0a1339ac3ec8f8e6c3d9be6c4dc3b272d7044f98 100644 (file)
@@ -47,7 +47,6 @@
 #include <asm/setup.h>
 
 int gcmp_present = -1;
-int gic_present;
 static unsigned long _msc01_biu_base;
 static unsigned long _gcmp_base;
 static unsigned int ipi_map[NR_CPUS];
@@ -134,6 +133,9 @@ static void malta_ipi_irqdispatch(void)
 {
        int irq;
 
+       if (gic_compare_int())
+               do_IRQ(MIPS_GIC_IRQ_BASE);
+
        irq = gic_get_int();
        if (irq < 0)
                return;  /* interrupt has already been cleared */
index f3d43aa023a9c9215dc06334204c1f82e122ef73..1f73d63e92a765d3ab1d829244a19e57dab8bd8e 100644 (file)
@@ -1,73 +1,45 @@
 /*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  *
  * PROM library functions for acquiring/using memory descriptors given to
  * us from the YAMON.
+ *
+ * Copyright (C) 1999,2000,2012  MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ *          Steven J. Hill <sjhill@mips.com>
  */
 #include <linux/init.h>
-#include <linux/mm.h>
 #include <linux/bootmem.h>
-#include <linux/pfn.h>
 #include <linux/string.h>
 
 #include <asm/bootinfo.h>
-#include <asm/page.h>
 #include <asm/sections.h>
+#include <asm/fw/fw.h>
 
-#include <asm/mips-boards/prom.h>
-
-/*#define DEBUG*/
-
-enum yamon_memtypes {
-       yamon_dontuse,
-       yamon_prom,
-       yamon_free,
-};
-static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
-
-#ifdef DEBUG
-static char *mtypes[3] = {
-       "Dont use memory",
-       "YAMON PROM memory",
-       "Free memory",
-};
-#endif
+static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS];
 
 /* determined physical memory size, not overridden by command line args         */
 unsigned long physical_memsize = 0L;
 
-static struct prom_pmemblock * __init prom_getmdesc(void)
+fw_memblock_t * __init fw_getmdesc(void)
 {
-       char *memsize_str;
+       char *memsize_str, *ptr;
        unsigned int memsize;
-       char *ptr;
        static char cmdline[COMMAND_LINE_SIZE] __initdata;
+       long val;
+       int tmp;
 
        /* otherwise look in the environment */
-       memsize_str = prom_getenv("memsize");
+       memsize_str = fw_getenv("memsize");
        if (!memsize_str) {
-               printk(KERN_WARNING
-                      "memsize not set in boot prom, set to default (32Mb)\n");
+               pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
                physical_memsize = 0x02000000;
        } else {
-#ifdef DEBUG
-               pr_debug("prom_memsize = %s\n", memsize_str);
-#endif
-               physical_memsize = simple_strtol(memsize_str, NULL, 0);
+               tmp = kstrtol(memsize_str, 0, &val);
+               physical_memsize = (unsigned long)val;
        }
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
@@ -90,11 +62,11 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
 
        memset(mdesc, 0, sizeof(mdesc));
 
-       mdesc[0].type = yamon_dontuse;
+       mdesc[0].type = fw_dontuse;
        mdesc[0].base = 0x00000000;
        mdesc[0].size = 0x00001000;
 
-       mdesc[1].type = yamon_prom;
+       mdesc[1].type = fw_code;
        mdesc[1].base = 0x00001000;
        mdesc[1].size = 0x000ef000;
 
@@ -105,55 +77,45 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
         * This mean that this area can't be used as DMA memory for PCI
         * devices.
         */
-       mdesc[2].type = yamon_dontuse;
+       mdesc[2].type = fw_dontuse;
        mdesc[2].base = 0x000f0000;
        mdesc[2].size = 0x00010000;
 
-       mdesc[3].type = yamon_dontuse;
+       mdesc[3].type = fw_dontuse;
        mdesc[3].base = 0x00100000;
-       mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base;
+       mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
+               mdesc[3].base;
 
-       mdesc[4].type = yamon_free;
+       mdesc[4].type = fw_free;
        mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
        mdesc[4].size = memsize - mdesc[4].base;
 
        return &mdesc[0];
 }
 
-static int __init prom_memtype_classify(unsigned int type)
+static int __init fw_memtype_classify(unsigned int type)
 {
        switch (type) {
-       case yamon_free:
+       case fw_free:
                return BOOT_MEM_RAM;
-       case yamon_prom:
+       case fw_code:
                return BOOT_MEM_ROM_DATA;
        default:
                return BOOT_MEM_RESERVED;
        }
 }
 
-void __init prom_meminit(void)
+void __init fw_meminit(void)
 {
-       struct prom_pmemblock *p;
+       fw_memblock_t *p;
 
-#ifdef DEBUG
-       pr_debug("YAMON MEMORY DESCRIPTOR dump:\n");
-       p = prom_getmdesc();
-       while (p->size) {
-               int i = 0;
-               pr_debug("[%d,%p]: base<%08lx> size<%08lx> type<%s>\n",
-                        i, p, p->base, p->size, mtypes[p->type]);
-               p++;
-               i++;
-       }
-#endif
-       p = prom_getmdesc();
+       p = fw_getmdesc();
 
        while (p->size) {
                long type;
                unsigned long base, size;
 
-               type = prom_memtype_classify(p->type);
+               type = fw_memtype_classify(p->type);
                base = p->base;
                size = p->size;
 
@@ -172,7 +134,7 @@ void __init prom_free_prom_memory(void)
                        continue;
 
                addr = boot_mem_map.map[i].addr;
-               free_init_pages("prom memory",
+               free_init_pages("YAMON memory",
                                addr, addr + boot_mem_map.map[i].size);
        }
 }
index 200f64df2c9b492bf6ec948ff03bbfee864f59fa..c72a069367819d1ca8c91532862dbfbd890de1b1 100644 (file)
 #include <linux/screen_info.h>
 #include <linux/time.h>
 
-#include <asm/bootinfo.h>
+#include <asm/fw/fw.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/malta.h>
 #include <asm/mips-boards/maltaint.h>
 #include <asm/dma.h>
 #include <asm/traps.h>
+#include <asm/gcmpregs.h>
 #ifdef CONFIG_VT
 #include <linux/console.h>
 #endif
@@ -105,6 +105,66 @@ static void __init fd_activate(void)
 }
 #endif
 
+static int __init plat_enable_iocoherency(void)
+{
+       int supported = 0;
+       if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
+               if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
+                       BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
+                       pr_info("Enabled Bonito CPU coherency\n");
+                       supported = 1;
+               }
+               if (strstr(fw_getcmdline(), "iobcuncached")) {
+                       BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
+                       BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
+                               ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+                                 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+                       pr_info("Disabled Bonito IOBC coherency\n");
+               } else {
+                       BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
+                       BONITO_PCIMEMBASECFG |=
+                               (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
+                                BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
+                       pr_info("Enabled Bonito IOBC coherency\n");
+               }
+       } else if (gcmp_niocu() != 0) {
+               /* Nothing special needs to be done to enable coherency */
+               pr_info("CMP IOCU detected\n");
+               if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
+                       pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
+                       return 0;
+               }
+               supported = 1;
+       }
+       hw_coherentio = supported;
+       return supported;
+}
+
+static void __init plat_setup_iocoherency(void)
+{
+#ifdef CONFIG_DMA_NONCOHERENT
+       /*
+        * Kernel has been configured with software coherency
+        * but we might choose to turn it off and use hardware
+        * coherency instead.
+        */
+       if (plat_enable_iocoherency()) {
+               if (coherentio == 0)
+                       pr_info("Hardware DMA cache coherency disabled\n");
+               else
+                       pr_info("Hardware DMA cache coherency enabled\n");
+       } else {
+               if (coherentio == 1)
+                       pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
+               else
+                       pr_info("Software DMA cache coherency enabled\n");
+       }
+#else
+       if (!plat_enable_iocoherency())
+               panic("Hardware DMA cache coherency not supported!");
+#endif
+}
+
 #ifdef CONFIG_BLK_DEV_IDE
 static void __init pci_clock_check(void)
 {
@@ -115,16 +175,15 @@ static void __init pci_clock_check(void)
                33, 20, 25, 30, 12, 16, 37, 10
        };
        int pciclock = pciclocks[jmpr];
-       char *argptr = prom_getcmdline();
+       char *argptr = fw_getcmdline();
 
        if (pciclock != 33 && !strstr(argptr, "idebus=")) {
-               printk(KERN_WARNING "WARNING: PCI clock is %dMHz, "
-                               "setting idebus\n", pciclock);
+               pr_warn("WARNING: PCI clock is %dMHz, setting idebus\n",
+                       pciclock);
                argptr += strlen(argptr);
                sprintf(argptr, " idebus=%d", pciclock);
                if (pciclock < 20 || pciclock > 66)
-                       printk(KERN_WARNING "WARNING: IDE timing "
-                                       "calculations will be incorrect\n");
+                       pr_warn("WARNING: IDE timing calculations will be incorrect\n");
        }
 }
 #endif
@@ -153,31 +212,31 @@ static void __init bonito_quirks_setup(void)
 {
        char *argptr;
 
-       argptr = prom_getcmdline();
+       argptr = fw_getcmdline();
        if (strstr(argptr, "debug")) {
                BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE;
-               printk(KERN_INFO "Enabled Bonito debug mode\n");
+               pr_info("Enabled Bonito debug mode\n");
        } else
                BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE;
 
 #ifdef CONFIG_DMA_COHERENT
        if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
                BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
-               printk(KERN_INFO "Enabled Bonito CPU coherency\n");
+               pr_info("Enabled Bonito CPU coherency\n");
 
-               argptr = prom_getcmdline();
+               argptr = fw_getcmdline();
                if (strstr(argptr, "iobcuncached")) {
                        BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN;
                        BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
                                ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
                                        BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
-                       printk(KERN_INFO "Disabled Bonito IOBC coherency\n");
+                       pr_info("Disabled Bonito IOBC coherency\n");
                } else {
                        BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN;
                        BONITO_PCIMEMBASECFG |=
                                (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
                                        BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
-                       printk(KERN_INFO "Enabled Bonito IOBC coherency\n");
+                       pr_info("Enabled Bonito IOBC coherency\n");
                }
        } else
                panic("Hardware DMA cache coherency not supported");
@@ -207,6 +266,8 @@ void __init plat_mem_setup(void)
        if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO)
                bonito_quirks_setup();
 
+       plat_setup_iocoherency();
+
 #ifdef CONFIG_BLK_DEV_IDE
        pci_clock_check();
 #endif
index a144b89cf9ba299e5f9fb2bcea5fd179f5ceb413..0ad305f75802bc64250ad01be6e76b1826ea95a2 100644 (file)
 #include <asm/gic.h>
 
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
-
 #include <asm/mips-boards/maltaint.h>
 
 unsigned long cpu_khz;
-int gic_frequency;
 
 static int mips_cpu_timer_irq;
 static int mips_cpu_perf_irq;
@@ -74,7 +71,24 @@ static void __init estimate_frequencies(void)
 {
        unsigned long flags;
        unsigned int count, start;
+#ifdef CONFIG_IRQ_GIC
        unsigned int giccount = 0, gicstart = 0;
+#endif
+
+#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
+       unsigned int prid = read_c0_prid() & 0xffff00;
+
+       /*
+        * XXXKYMA: hardwire the CPU frequency to Host Freq/4
+        */
+       count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3;
+       if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
+           (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
+               count *= 2;
+
+       mips_hpt_frequency = count;
+       return;
+#endif
 
        local_irq_save(flags);
 
@@ -84,26 +98,32 @@ static void __init estimate_frequencies(void)
 
        /* Initialize counters. */
        start = read_c0_count();
+#ifdef CONFIG_IRQ_GIC
        if (gic_present)
                GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart);
+#endif
 
        /* Read counter exactly on falling edge of update flag. */
        while (CMOS_READ(RTC_REG_A) & RTC_UIP);
        while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
 
        count = read_c0_count();
+#ifdef CONFIG_IRQ_GIC
        if (gic_present)
                GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount);
+#endif
 
        local_irq_restore(flags);
 
        count -= start;
-       if (gic_present)
-               giccount -= gicstart;
-
        mips_hpt_frequency = count;
-       if (gic_present)
+
+#ifdef CONFIG_IRQ_GIC
+       if (gic_present) {
+               giccount -= gicstart;
                gic_frequency = giccount;
+       }
+#endif
 }
 
 void read_persistent_clock(struct timespec *ts)
@@ -159,24 +179,27 @@ void __init plat_time_init(void)
            (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
                freq *= 2;
        freq = freqround(freq, 5000);
-       pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000,
+       printk("CPU frequency %d.%02d MHz\n", freq/1000000,
               (freq%1000000)*100/1000000);
        cpu_khz = freq / 1000;
 
-       if (gic_present) {
-               freq = freqround(gic_frequency, 5000);
-               pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000,
-                      (freq%1000000)*100/1000000);
-               gic_clocksource_init(gic_frequency);
-       } else
-               init_r4k_clocksource();
+       mips_scroll_message();
 
 #ifdef CONFIG_I8253
        /* Only Malta has a PIT. */
        setup_pit_timer();
 #endif
 
-       mips_scroll_message();
+#ifdef CONFIG_IRQ_GIC
+       if (gic_present) {
+               freq = freqround(gic_frequency, 5000);
+               printk("GIC frequency %d.%02d MHz\n", freq/1000000,
+                      (freq%1000000)*100/1000000);
+#ifdef CONFIG_CSRC_GIC
+               gic_clocksource_init(gic_frequency);
+#endif
+       }
+#endif
 
        plat_perf_setup();
 }
index 10ec701ce6c72e8cd056fd8ac6fb55b19608d8c8..be114209217cc5a13fe8531365ba5454b12c530f 100644 (file)
@@ -8,10 +8,10 @@
 # Copyright (C) 2012 MIPS Technoligies, Inc.  All rights reserved.
 # Steven J. Hill <sjhill@mips.com>
 #
-obj-y                          := sead3-lcd.o sead3-cmdline.o \
-                                  sead3-display.o sead3-init.o sead3-int.o \
-                                  sead3-mtd.o sead3-net.o sead3-platform.o \
-                                  sead3-reset.o sead3-setup.o sead3-time.o
+obj-y                          := sead3-lcd.o sead3-display.o sead3-init.o \
+                                  sead3-int.o sead3-mtd.o sead3-net.o \
+                                  sead3-platform.o sead3-reset.o \
+                                  sead3-setup.o sead3-time.o
 
 obj-y                          += sead3-i2c-dev.o sead3-i2c.o \
                                   sead3-pic32-i2c-drv.o sead3-pic32-bus.o \
index 322148c353ed65065bd146f05de3ce27198382ca..0a168c948b01a9e593c5c4cc7cce7679a86c618b 100644 (file)
@@ -34,33 +34,15 @@ static void sead3_fled_set(struct led_classdev *led_cdev,
 static struct led_classdev sead3_pled = {
        .name           = "sead3::pled",
        .brightness_set = sead3_pled_set,
+       .flags          = LED_CORE_SUSPENDRESUME,
 };
 
 static struct led_classdev sead3_fled = {
        .name           = "sead3::fled",
        .brightness_set = sead3_fled_set,
+       .flags          = LED_CORE_SUSPENDRESUME,
 };
 
-#ifdef CONFIG_PM
-static int sead3_led_suspend(struct platform_device *dev,
-               pm_message_t state)
-{
-       led_classdev_suspend(&sead3_pled);
-       led_classdev_suspend(&sead3_fled);
-       return 0;
-}
-
-static int sead3_led_resume(struct platform_device *dev)
-{
-       led_classdev_resume(&sead3_pled);
-       led_classdev_resume(&sead3_fled);
-       return 0;
-}
-#else
-#define sead3_led_suspend NULL
-#define sead3_led_resume NULL
-#endif
-
 static int sead3_led_probe(struct platform_device *pdev)
 {
        int ret;
@@ -86,8 +68,6 @@ static int sead3_led_remove(struct platform_device *pdev)
 static struct platform_driver sead3_led_driver = {
        .probe          = sead3_led_probe,
        .remove         = sead3_led_remove,
-       .suspend        = sead3_led_suspend,
-       .resume         = sead3_led_resume,
        .driver         = {
                .name           = DRVNAME,
                .owner          = THIS_MODULE,
diff --git a/arch/mips/mti-sead3/sead3-cmdline.c b/arch/mips/mti-sead3/sead3-cmdline.c
deleted file mode 100644 (file)
index a2e6cec..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-
-extern int prom_argc;
-extern int *_prom_argv;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension.
- */
-#define prom_argv(index) ((char *)(long)_prom_argv[(index)])
-
-char * __init prom_getcmdline(void)
-{
-       return &(arcs_cmdline[0]);
-}
-
-void  __init prom_init_cmdline(void)
-{
-       char *cp;
-       int actr;
-
-       actr = 1; /* Always ignore argv[0] */
-
-       cp = &(arcs_cmdline[0]);
-       while (actr < prom_argc) {
-               strcpy(cp, prom_argv(actr));
-               cp += strlen(prom_argv(actr));
-               *cp++ = ' ';
-               actr++;
-       }
-       if (cp != &(arcs_cmdline[0])) {
-               /* get rid of trailing space */
-               --cp;
-               *cp = '\0';
-       }
-}
index 2ddef19a9adc891b898188f8fae38e692309073a..031f47d6977006451f514f3aa1b72d0219f1821c 100644 (file)
@@ -26,7 +26,7 @@ static inline void serial_out(int offset, int value, unsigned int base_addr)
        __raw_writel(value, PORT(base_addr, offset));
 }
 
-void __init prom_init_early_console(char port)
+void __init fw_init_early_console(char port)
 {
        console_port = port;
 }
index e389326cfa423add03614ec7baa10b1ed529c7e2..94875991907bd4c3bfda8ac32b825a3513e7a4e8 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/timer.h>
 #include <linux/io.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 
 static unsigned int display_count;
 static unsigned int max_display_count;
index f95abaa1aa5db6a06b4d8b80696c99120ed48711..bfbd17b120a21d637888a65db614df206de41043 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/traps.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
-
-extern void prom_init_early_console(char port);
+#include <asm/fw/fw.h>
 
 extern char except_vec_nmi;
 extern char except_vec_ejtag_debug;
 
-int prom_argc;
-int *_prom_argv, *_prom_envp;
-
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-char *prom_getenv(char *envname)
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static void __init console_config(void)
 {
-       /*
-        * Return a pointer to the given environment variable.
-        * In 64-bit mode: we're using 64-bit pointers, but all pointers
-        * in the PROM structures are only 32-bit, so we need some
-        * workarounds, if we are running in 64-bit mode.
-        */
-       int i, index = 0;
-
-       i = strlen(envname);
-
-       while (prom_envp(index)) {
-               if (strncmp(envname, prom_envp(index), i) == 0)
-                       return prom_envp(index+1);
-               index += 2;
+       char console_string[40];
+       int baud = 0;
+       char parity = '\0', bits = '\0', flow = '\0';
+       char *s;
+
+       if ((strstr(fw_getcmdline(), "console=")) == NULL) {
+               s = fw_getenv("modetty0");
+               if (s) {
+                       while (*s >= '0' && *s <= '9')
+                               baud = baud*10 + *s++ - '0';
+                       if (*s == ',')
+                               s++;
+                       if (*s)
+                               parity = *s++;
+                       if (*s == ',')
+                               s++;
+                       if (*s)
+                               bits = *s++;
+                       if (*s == ',')
+                               s++;
+                       if (*s == 'h')
+                               flow = 'r';
+               }
+               if (baud == 0)
+                       baud = 38400;
+               if (parity != 'n' && parity != 'o' && parity != 'e')
+                       parity = 'n';
+               if (bits != '7' && bits != '8')
+                       bits = '8';
+               if (flow == '\0')
+                       flow = 'r';
+               sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
+                       parity, bits, flow);
+               strcat(fw_getcmdline(), console_string);
        }
-
-       return NULL;
 }
+#endif
 
 static void __init mips_nmi_setup(void)
 {
@@ -52,7 +65,41 @@ static void __init mips_nmi_setup(void)
        base = cpu_has_veic ?
                (void *)(CAC_BASE + 0xa80) :
                (void *)(CAC_BASE + 0x380);
+#ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * Decrement the exception vector address by one for microMIPS.
+        */
+       memcpy(base, (&except_vec_nmi - 1), 0x80);
+
+       /*
+        * This is a hack. We do not know if the boot loader was built with
+        * microMIPS instructions or not. If it was not, the NMI exception
+        * code at 0x80000a80 will be taken in MIPS32 mode. The hand coded
+        * assembly below forces us into microMIPS mode if we are a pure
+        * microMIPS kernel. The assembly instructions are:
+        *
+        *  3C1A8000   lui       k0,0x8000
+        *  375A0381   ori       k0,k0,0x381
+        *  03400008   jr        k0
+        *  00000000   nop
+        *
+        * The mode switch occurs by jumping to the unaligned exception
+        * vector address at 0x80000381 which would have been 0x80000380
+        * in MIPS32 mode. The jump to the unaligned address transitions
+        * us into microMIPS mode.
+        */
+       if (!cpu_has_veic) {
+               void *base2 = (void *)(CAC_BASE + 0xa80);
+               *((unsigned int *)base2) = 0x3c1a8000;
+               *((unsigned int *)base2 + 1) = 0x375a0381;
+               *((unsigned int *)base2 + 2) = 0x03400008;
+               *((unsigned int *)base2 + 3) = 0x00000000;
+               flush_icache_range((unsigned long)base2,
+                       (unsigned long)base2 + 0x10);
+       }
+#else
        memcpy(base, &except_vec_nmi, 0x80);
+#endif
        flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
 }
 
@@ -63,29 +110,40 @@ static void __init mips_ejtag_setup(void)
        base = cpu_has_veic ?
                (void *)(CAC_BASE + 0xa00) :
                (void *)(CAC_BASE + 0x300);
+#ifdef CONFIG_CPU_MICROMIPS
+       /* Deja vu... */
+       memcpy(base, (&except_vec_ejtag_debug - 1), 0x80);
+       if (!cpu_has_veic) {
+               void *base2 = (void *)(CAC_BASE + 0xa00);
+               *((unsigned int *)base2) = 0x3c1a8000;
+               *((unsigned int *)base2 + 1) = 0x375a0301;
+               *((unsigned int *)base2 + 2) = 0x03400008;
+               *((unsigned int *)base2 + 3) = 0x00000000;
+               flush_icache_range((unsigned long)base2,
+                       (unsigned long)base2 + 0x10);
+       }
+#else
        memcpy(base, &except_vec_ejtag_debug, 0x80);
+#endif
        flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
 }
 
 void __init prom_init(void)
 {
-       prom_argc = fw_arg0;
-       _prom_argv = (int *) fw_arg1;
-       _prom_envp = (int *) fw_arg2;
-
        board_nmi_handler_setup = mips_nmi_setup;
        board_ejtag_handler_setup = mips_ejtag_setup;
 
-       prom_init_cmdline();
+       fw_init_cmdline();
 #ifdef CONFIG_EARLY_PRINTK
-       if ((strstr(prom_getcmdline(), "console=ttyS0")) != NULL)
-               prom_init_early_console(0);
-       else if ((strstr(prom_getcmdline(), "console=ttyS1")) != NULL)
-               prom_init_early_console(1);
+       if ((strstr(fw_getcmdline(), "console=ttyS0")) != NULL)
+               fw_init_early_console(0);
+       else if ((strstr(fw_getcmdline(), "console=ttyS1")) != NULL)
+               fw_init_early_console(1);
 #endif
 #ifdef CONFIG_SERIAL_8250_CONSOLE
-       if ((strstr(prom_getcmdline(), "console=")) == NULL)
-               strcat(prom_getcmdline(), " console=ttyS0,38400n8r");
+       if ((strstr(fw_getcmdline(), "console=")) == NULL)
+               strcat(fw_getcmdline(), " console=ttyS0,38400n8r");
+       console_config();
 #endif
 }
 
index e26e08274fc5b32a898ccdc40c8c346ec2a9d7d8..6a560ac03def0db5f2a794529998cf5e39ad1887 100644 (file)
@@ -20,7 +20,6 @@
 #define SEAD_CONFIG_BASE               0x1b100110
 #define SEAD_CONFIG_SIZE               4
 
-int gic_present;
 static unsigned long sead3_config_reg;
 
 /*
index f012fd164ceedc9e2aed9a94ebb30cc2bdc602c2..b5059dc899f44d2616a4c72c873fe82819474c09 100644 (file)
 #include <linux/bootmem.h>
 
 #include <asm/mips-boards/generic.h>
-#include <asm/prom.h>
-
-int coherentio;                /* 0 => no DMA cache coherency (may be set by user) */
-int hw_coherentio;     /* 0 => no HW DMA cache coherency (reflects real HW) */
 
 const char *get_system_type(void)
 {
index 239e4e32757fc566f8fb96b4366806531ba87aa7..96b42eb9b5e221cccdc4cf861fe48f5455cb4d0f 100644 (file)
@@ -11,7 +11,6 @@
 #include <asm/time.h>
 #include <asm/irq.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 
 unsigned long cpu_khz;
 
index 3c05bf9e280ae5e66ea1e8ae3cfc96179a610d8b..e0873a31ebaace03336502d8bf57087fca3375b9 100644 (file)
@@ -2,13 +2,22 @@ if NLM_XLP_BOARD || NLM_XLR_BOARD
 
 if NLM_XLP_BOARD
 config DT_XLP_EVP
-       bool "Built-in device tree for XLP EVP/SVP boards"
+       bool "Built-in device tree for XLP EVP boards"
        default y
        help
-         Add an FDT blob for XLP EVP and SVP boards into the kernel.
+         Add an FDT blob for XLP EVP boards into the kernel.
          This DTB will be used if the firmware does not pass in a DTB
-          pointer to the kernel.  The corresponding DTS file is at
-          arch/mips/netlogic/dts/xlp_evp.dts
+         pointer to the kernel.  The corresponding DTS file is at
+         arch/mips/netlogic/dts/xlp_evp.dts
+
+config DT_XLP_SVP
+       bool "Built-in device tree for XLP SVP boards"
+       default y
+       help
+         Add an FDT blob for XLP VP boards into the kernel.
+         This DTB will be used if the firmware does not pass in a DTB
+         pointer to the kernel.  The corresponding DTS file is at
+         arch/mips/netlogic/dts/xlp_svp.dts
 
 config NLM_MULTINODE
        bool "Support for multi-chip boards"
index 2bb95dcfe20addf631ec84d15267563c30fe26e2..ffba52489bef7b52a1dcb996cb911c277359a45d 100644 (file)
@@ -148,8 +148,7 @@ void nlm_cpus_done(void)
 int nlm_cpu_ready[NR_CPUS];
 unsigned long nlm_next_gp;
 unsigned long nlm_next_sp;
-
-cpumask_t phys_cpu_present_map;
+static cpumask_t phys_cpu_present_mask;
 
 void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
 {
@@ -169,11 +168,12 @@ void __init nlm_smp_setup(void)
 {
        unsigned int boot_cpu;
        int num_cpus, i, ncore;
+       char buf[64];
 
        boot_cpu = hard_smp_processor_id();
-       cpumask_clear(&phys_cpu_present_map);
+       cpumask_clear(&phys_cpu_present_mask);
 
-       cpumask_set_cpu(boot_cpu, &phys_cpu_present_map);
+       cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
        __cpu_number_map[boot_cpu] = 0;
        __cpu_logical_map[0] = boot_cpu;
        set_cpu_possible(0, true);
@@ -185,7 +185,7 @@ void __init nlm_smp_setup(void)
                 * it is only set for ASPs (see smpboot.S)
                 */
                if (nlm_cpu_ready[i]) {
-                       cpumask_set_cpu(i, &phys_cpu_present_map);
+                       cpumask_set_cpu(i, &phys_cpu_present_mask);
                        __cpu_number_map[i] = num_cpus;
                        __cpu_logical_map[num_cpus] = i;
                        set_cpu_possible(num_cpus, true);
@@ -193,16 +193,19 @@ void __init nlm_smp_setup(void)
                }
        }
 
+       cpumask_scnprintf(buf, ARRAY_SIZE(buf), &phys_cpu_present_mask);
+       pr_info("Physical CPU mask: %s\n", buf);
+       cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask);
+       pr_info("Possible CPU mask: %s\n", buf);
+
        /* check with the cores we have worken up */
        for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
                ncore += hweight32(nlm_get_node(i)->coremask);
 
-       pr_info("Phys CPU present map: %lx, possible map %lx\n",
-               (unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
-               (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
-
        pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
                nlm_threads_per_core, num_cpus);
+
+       /* switch NMI handler to boot CPUs */
        nlm_set_nmi_handler(nlm_boot_secondary_cpus);
 }
 
index d117d46413aa29d9b03806935c3d206f0e4f6b29..aecb6fa9a9c33968a187c872672d835c5ee39845 100644 (file)
@@ -1 +1,2 @@
 obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o
+obj-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb.o
index 7628b5464fc76161bf84f5192221b2e9716c1478..e14f4230806427bfb481d4e46d7a757cd2c82114 100644 (file)
@@ -20,7 +20,7 @@
                #address-cells = <2>;
                #size-cells = <1>;
                compatible = "simple-bus";
-               ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
+               ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
                          1 0  0 0x16000000  0x01000000>; // GBU chipselects
 
                serial0: serial@30000 {
diff --git a/arch/mips/netlogic/dts/xlp_svp.dts b/arch/mips/netlogic/dts/xlp_svp.dts
new file mode 100644 (file)
index 0000000..8af4bdb
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * XLP3XX Device Tree Source for SVP boards
+ */
+
+/dts-v1/;
+/ {
+       model = "netlogic,XLP-SVP";
+       compatible = "netlogic,xlp";
+       #address-cells = <2>;
+       #size-cells = <2>;
+
+       memory {
+               device_type = "memory";
+               reg =  <0 0x00100000 0 0x0FF00000       // 255M at 1M
+                       0 0x20000000 0 0xa0000000       // 2560M at 512M
+                       0 0xe0000000 0 0x40000000>;
+       };
+
+       soc {
+               #address-cells = <2>;
+               #size-cells = <1>;
+               compatible = "simple-bus";
+               ranges = <0 0  0 0x18000000  0x04000000   // PCIe CFG
+                         1 0  0 0x16000000  0x01000000>; // GBU chipselects
+
+               serial0: serial@30000 {
+                       device_type = "serial";
+                       compatible = "ns16550";
+                       reg = <0 0x30100 0xa00>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clock-frequency = <133333333>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <17>;
+               };
+               serial1: serial@31000 {
+                       device_type = "serial";
+                       compatible = "ns16550";
+                       reg = <0 0x31100 0xa00>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clock-frequency = <133333333>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <18>;
+               };
+               i2c0: ocores@32000 {
+                       compatible = "opencores,i2c-ocores";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0 0x32100 0xa00>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clock-frequency = <32000000>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <30>;
+               };
+               i2c1: ocores@33000 {
+                       compatible = "opencores,i2c-ocores";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0 0x33100 0xa00>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clock-frequency = <32000000>;
+                       interrupt-parent = <&pic>;
+                       interrupts = <31>;
+
+                       rtc@68 {
+                               compatible = "dallas,ds1374";
+                               reg = <0x68>;
+                       };
+
+                       dtt@4c {
+                               compatible = "national,lm90";
+                               reg = <0x4c>;
+                       };
+               };
+               pic: pic@4000 {
+                       interrupt-controller;
+                       #address-cells = <0>;
+                       #interrupt-cells = <1>;
+                       reg = <0 0x4000 0x200>;
+               };
+
+               nor_flash@1,0 {
+                       compatible = "cfi-flash";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       bank-width = <2>;
+                       reg = <1 0 0x1000000>;
+
+                       partition@0 {
+                               label = "x-loader";
+                               reg = <0x0 0x100000>; /* 1M */
+                               read-only;
+                       };
+
+                       partition@100000 {
+                               label = "u-boot";
+                               reg = <0x100000 0x100000>; /* 1M */
+                       };
+
+                       partition@200000 {
+                               label = "kernel";
+                               reg = <0x200000 0x500000>; /* 5M */
+                       };
+
+                       partition@700000 {
+                               label = "rootfs";
+                               reg = <0x700000 0x800000>; /* 8M */
+                       };
+
+                       partition@f00000 {
+                               label = "env";
+                               reg = <0xf00000 0x100000>; /* 1M */
+                               read-only;
+                       };
+               };
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,115200 rdinit=/sbin/init";
+       };
+};
index c68fd4026104b7ed4db04d21ae8d86753fa71266..87560e4db35f148e87028b6ce34fcaf1485177bf 100644 (file)
@@ -61,43 +61,61 @@ void nlm_node_init(int node)
 
 int nlm_irq_to_irt(int irq)
 {
-       if (!PIC_IRQ_IS_IRT(irq))
-               return -1;
+       uint64_t pcibase;
+       int devoff, irt;
 
        switch (irq) {
        case PIC_UART_0_IRQ:
-               return PIC_IRT_UART_0_INDEX;
+               devoff = XLP_IO_UART0_OFFSET(0);
+               break;
        case PIC_UART_1_IRQ:
-               return PIC_IRT_UART_1_INDEX;
-       case PIC_PCIE_LINK_0_IRQ:
-              return PIC_IRT_PCIE_LINK_0_INDEX;
-       case PIC_PCIE_LINK_1_IRQ:
-              return PIC_IRT_PCIE_LINK_1_INDEX;
-       case PIC_PCIE_LINK_2_IRQ:
-              return PIC_IRT_PCIE_LINK_2_INDEX;
-       case PIC_PCIE_LINK_3_IRQ:
-              return PIC_IRT_PCIE_LINK_3_INDEX;
+               devoff = XLP_IO_UART1_OFFSET(0);
+               break;
        case PIC_EHCI_0_IRQ:
-              return PIC_IRT_EHCI_0_INDEX;
+               devoff = XLP_IO_USB_EHCI0_OFFSET(0);
+               break;
        case PIC_EHCI_1_IRQ:
-              return PIC_IRT_EHCI_1_INDEX;
+               devoff = XLP_IO_USB_EHCI1_OFFSET(0);
+               break;
        case PIC_OHCI_0_IRQ:
-              return PIC_IRT_OHCI_0_INDEX;
+               devoff = XLP_IO_USB_OHCI0_OFFSET(0);
+               break;
        case PIC_OHCI_1_IRQ:
-              return PIC_IRT_OHCI_1_INDEX;
+               devoff = XLP_IO_USB_OHCI1_OFFSET(0);
+               break;
        case PIC_OHCI_2_IRQ:
-              return PIC_IRT_OHCI_2_INDEX;
+               devoff = XLP_IO_USB_OHCI2_OFFSET(0);
+               break;
        case PIC_OHCI_3_IRQ:
-              return PIC_IRT_OHCI_3_INDEX;
+               devoff = XLP_IO_USB_OHCI3_OFFSET(0);
+               break;
        case PIC_MMC_IRQ:
-              return PIC_IRT_MMC_INDEX;
+               devoff = XLP_IO_SD_OFFSET(0);
+               break;
        case PIC_I2C_0_IRQ:
-               return PIC_IRT_I2C_0_INDEX;
+               devoff = XLP_IO_I2C0_OFFSET(0);
+               break;
        case PIC_I2C_1_IRQ:
-               return PIC_IRT_I2C_1_INDEX;
+               devoff = XLP_IO_I2C1_OFFSET(0);
+               break;
        default:
-               return -1;
+               devoff = 0;
+               break;
        }
+
+       if (devoff != 0) {
+               pcibase = nlm_pcicfg_base(devoff);
+               irt = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG) & 0xffff;
+               /* HW bug, I2C 1 irt entry is off by one */
+               if (irq == PIC_I2C_1_IRQ)
+                       irt = irt + 1;
+       } else if (irq >= PIC_PCIE_LINK_0_IRQ && irq <= PIC_PCIE_LINK_3_IRQ) {
+               /* HW bug, PCI IRT entries are bad on early silicon, fix */
+               irt = PIC_IRT_PCIE_LINK_INDEX(irq - PIC_PCIE_LINK_0_IRQ);
+       } else {
+               irt = -1;
+       }
+       return irt;
 }
 
 unsigned int nlm_get_core_frequency(int node, int core)
index 4894d62043ac3639ce7844069995466ced103ea0..af319143b59187a4918805f737e2dcdb480845e7 100644 (file)
@@ -56,7 +56,7 @@ uint64_t nlm_io_base;
 struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
 cpumask_t nlm_cpumask = CPU_MASK_CPU0;
 unsigned int nlm_threads_per_core;
-extern u32 __dtb_start[];
+extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[];
 
 static void nlm_linux_exit(void)
 {
@@ -82,8 +82,24 @@ void __init plat_mem_setup(void)
         * 64-bit, so convert pointer.
         */
        fdtp = (void *)(long)fw_arg0;
-       if (!fdtp)
-               fdtp = __dtb_start;
+       if (!fdtp) {
+               switch (current_cpu_data.processor_id & 0xff00) {
+#ifdef CONFIG_DT_XLP_SVP
+               case PRID_IMP_NETLOGIC_XLP3XX:
+                       fdtp = __dtb_xlp_svp_begin;
+                       break;
+#endif
+#ifdef CONFIG_DT_XLP_EVP
+               case PRID_IMP_NETLOGIC_XLP8XX:
+                       fdtp = __dtb_xlp_evp_begin;
+                       break;
+#endif
+               default:
+                       /* Pick a built-in if any, and hope for the best */
+                       fdtp = __dtb_start;
+                       break;
+               }
+       }
        fdtp = phys_to_virt(__pa(fdtp));
        early_init_devtree(fdtp);
 }
index 1d0b66c62fd1beed8f4f601a380170bcb16c17df..9c401dd78337384d1d95050c26ad3b38935d3f50 100644 (file)
 #include <asm/netlogic/haldefs.h>
 #include <asm/netlogic/xlp-hal/iomap.h>
 #include <asm/netlogic/xlp-hal/xlp.h>
-#include <asm/netlogic/xlp-hal/usb.h>
+
+/*
+ * USB glue logic registers, used only during initialization
+ */
+#define USB_CTL_0                      0x01
+#define USB_PHY_0                      0x0A
+#define USB_PHY_RESET                  0x01
+#define USB_PHY_PORT_RESET_0           0x10
+#define USB_PHY_PORT_RESET_1           0x20
+#define USB_CONTROLLER_RESET           0x01
+#define USB_INT_STATUS                 0x0E
+#define USB_INT_EN                     0x0F
+#define USB_PHY_INTERRUPT_EN           0x01
+#define USB_OHCI_INTERRUPT_EN          0x02
+#define USB_OHCI_INTERRUPT1_EN         0x04
+#define USB_OHCI_INTERRUPT2_EN         0x08
+#define USB_CTRL_INTERRUPT_EN          0x10
+
+#define nlm_read_usb_reg(b, r)                 nlm_read_reg(b, r)
+#define nlm_write_usb_reg(b, r, v)             nlm_write_reg(b, r, v)
+#define nlm_get_usb_pcibase(node, inst)                \
+       nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
+#define nlm_get_usb_regbase(node, inst)                \
+       (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
 
 static void nlm_usb_intr_en(int node, int port)
 {
@@ -99,23 +122,23 @@ static void nlm_usb_fixup_final(struct pci_dev *dev)
        dev->dev.coherent_dma_mask      = DMA_BIT_MASK(64);
        switch (dev->devfn) {
        case 0x10:
-              dev->irq = PIC_EHCI_0_IRQ;
-              break;
+               dev->irq = PIC_EHCI_0_IRQ;
+               break;
        case 0x11:
-              dev->irq = PIC_OHCI_0_IRQ;
-              break;
+               dev->irq = PIC_OHCI_0_IRQ;
+               break;
        case 0x12:
-              dev->irq = PIC_OHCI_1_IRQ;
-              break;
+               dev->irq = PIC_OHCI_1_IRQ;
+               break;
        case 0x13:
-              dev->irq = PIC_EHCI_1_IRQ;
-              break;
+               dev->irq = PIC_EHCI_1_IRQ;
+               break;
        case 0x14:
-              dev->irq = PIC_OHCI_2_IRQ;
-              break;
+               dev->irq = PIC_OHCI_2_IRQ;
+               break;
        case 0x15:
-              dev->irq = PIC_OHCI_3_IRQ;
-              break;
+               dev->irq = PIC_OHCI_3_IRQ;
+               break;
        }
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI,
index 1fd361462c030b7fefd06c676f54129ff4fe63f6..e4b1140cdae060dca0de8bfdd6a5985b1429de58 100644 (file)
@@ -41,7 +41,7 @@ static int (*save_perf_irq)(void);
  * first hardware thread in the core for setup and init.
  * Skip CPUs with non-zero hardware thread id (4 hwt per core)
  */
-#ifdef CONFIG_CPU_XLR
+#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
 #define oprofile_skip_cpu(c)   ((cpu_logical_map(c) & 0x3) != 0)
 #else
 #define oprofile_skip_cpu(c)   0
index 412ec025cf55643d528ca28116669a519f8ee182..18517dd0f7090987fe182df3d2a4dbe4e62842cf 100644 (file)
@@ -366,9 +366,9 @@ static int ar71xx_pci_probe(struct platform_device *pdev)
        if (!res)
                return -EINVAL;
 
-       apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (!apc->cfg_base)
-               return -ENOMEM;
+       apc->cfg_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apc->cfg_base))
+               return PTR_ERR(apc->cfg_base);
 
        apc->irq = platform_get_irq(pdev, 0);
        if (apc->irq < 0)
index 8a0700d448fe45c65cb9063e37e7453dc1e05931..65ec032fa0b442367c93b70cf8599fb5bd03fdd1 100644 (file)
@@ -365,25 +365,25 @@ static int ar724x_pci_probe(struct platform_device *pdev)
        if (!res)
                return -EINVAL;
 
-       apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (apc->ctrl_base == NULL)
-               return -EBUSY;
+       apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apc->ctrl_base))
+               return PTR_ERR(apc->ctrl_base);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
        if (!res)
                return -EINVAL;
 
-       apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (!apc->devcfg_base)
-               return -EBUSY;
+       apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apc->devcfg_base))
+               return PTR_ERR(apc->devcfg_base);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
        if (!res)
                return -EINVAL;
 
-       apc->crp_base = devm_request_and_ioremap(&pdev->dev, res);
-       if (apc->crp_base == NULL)
-               return -EBUSY;
+       apc->crp_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(apc->crp_base))
+               return PTR_ERR(apc->crp_base);
 
        apc->irq = platform_get_irq(pdev, 0);
        if (apc->irq < 0)
index 88e781c6b5bab274b53f76c55b7f51c2356b1bc6..2eb954239bc5bbc3b1edac6325be44f287d76235 100644 (file)
@@ -121,11 +121,17 @@ void __iomem *pci_iospace_start;
 static void __init bcm63xx_reset_pcie(void)
 {
        u32 val;
+       u32 reg;
 
        /* enable SERDES */
-       val = bcm_misc_readl(MISC_SERDES_CTRL_REG);
+       if (BCMCPU_IS_6328())
+               reg = MISC_SERDES_CTRL_6328_REG;
+       else
+               reg = MISC_SERDES_CTRL_6362_REG;
+
+       val = bcm_misc_readl(reg);
        val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN;
-       bcm_misc_writel(val, MISC_SERDES_CTRL_REG);
+       bcm_misc_writel(val, reg);
 
        /* reset the PCIe core */
        bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1);
@@ -330,6 +336,7 @@ static int __init bcm63xx_pci_init(void)
 
        switch (bcm63xx_get_cpu_id()) {
        case BCM6328_CPU_ID:
+       case BCM6362_CPU_ID:
                return bcm63xx_register_pcie();
        case BCM6348_CPU_ID:
        case BCM6358_CPU_ID:
index 5bd9d8f468cc3582ab02114ff272b833d8893565..a01baff52cae0ab1ad652574843049c6cf640097 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/traps.h>
 
-#include <asm/mips-boards/prom.h>
 #include <asm/mips-boards/generic.h>
 #include <asm/mach-powertv/asic.h>
 
+#include "init.h"
+
 static int *_prom_envp;
 unsigned long _prom_memsize;
 
index b194c34ca9660eda535d77cc758f79868cdbbc82..c1a8bd0dbe4b38984958f4fbb498e392b0cda8eb 100644 (file)
@@ -23,4 +23,6 @@
 #ifndef _POWERTV_INIT_H
 #define _POWERTV_INIT_H
 extern unsigned long _prom_memsize;
+extern void prom_meminit(void);
+extern char *prom_getenv(char *name);
 #endif
index 6e5f1bdc59b5fa77215803d8e919b55afe3c081d..bc2f3ca22b413a27ed8f245495d5c8a38374d8d2 100644 (file)
@@ -29,7 +29,6 @@
 #include <asm/page.h>
 #include <asm/sections.h>
 
-#include <asm/mips-boards/prom.h>
 #include <asm/mach-powertv/asic.h>
 #include <asm/mach-powertv/ioremap.h>
 
index 820b8480f2223850f56169e6570e50fed0ae8de3..24689bff1039fe522e50e6d70c0eb0ab725e7849 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/bootinfo.h>
 #include <asm/irq.h>
 #include <asm/mips-boards/generic.h>
-#include <asm/mips-boards/prom.h>
 #include <asm/dma.h>
 #include <asm/asm.h>
 #include <asm/traps.h>
index a0b0197cab0aa205cdebbbb9e31fe6381da5aa07..026e823d871d34e8952ba22b064e59cd4b778fda 100644 (file)
@@ -6,12 +6,23 @@ choice
        help
          Select Ralink MIPS SoC type.
 
+       config SOC_RT288X
+               bool "RT288x"
+
        config SOC_RT305X
                bool "RT305x"
                select USB_ARCH_HAS_HCD
                select USB_ARCH_HAS_OHCI
                select USB_ARCH_HAS_EHCI
 
+       config SOC_RT3883
+               bool "RT3883"
+               select USB_ARCH_HAS_OHCI
+               select USB_ARCH_HAS_EHCI
+
+       config SOC_MT7620
+               bool "MT7620"
+
 endchoice
 
 choice
@@ -23,10 +34,22 @@ choice
        config DTB_RT_NONE
                bool "None"
 
+       config DTB_RT2880_EVAL
+               bool "RT2880 eval kit"
+               depends on SOC_RT288X
+
        config DTB_RT305X_EVAL
                bool "RT305x eval kit"
                depends on SOC_RT305X
 
+       config DTB_RT3883_EVAL
+               bool "RT3883 eval kit"
+               depends on SOC_RT3883
+
+       config DTB_MT7620A_EVAL
+               bool "MT7620A eval kit"
+               depends on SOC_MT7620
+
 endchoice
 
 endif
index 939757f0e71f61024bdc32d50102af1eb50c9c65..38cf1a880aaac21ca6b0e38b3561e1832aa5e4c3 100644 (file)
@@ -8,7 +8,10 @@
 
 obj-y := prom.o of.o reset.o clk.o irq.o
 
+obj-$(CONFIG_SOC_RT288X) += rt288x.o
 obj-$(CONFIG_SOC_RT305X) += rt305x.o
+obj-$(CONFIG_SOC_RT3883) += rt3883.o
+obj-$(CONFIG_SOC_MT7620) += mt7620.o
 
 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
 
index 6babd65765e608a6d49a51a50a627da0938e23b8..cda4b6645c50587da40f9ad9ffa0357d27fe74ab 100644 (file)
@@ -4,7 +4,25 @@
 core-$(CONFIG_RALINK)          += arch/mips/ralink/
 cflags-$(CONFIG_RALINK)                += -I$(srctree)/arch/mips/include/asm/mach-ralink
 
+#
+# Ralink RT288x
+#
+load-$(CONFIG_SOC_RT288X)      += 0xffffffff88000000
+cflags-$(CONFIG_SOC_RT288X)    += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt288x
+
 #
 # Ralink RT305x
 #
 load-$(CONFIG_SOC_RT305X)      += 0xffffffff80000000
+cflags-$(CONFIG_SOC_RT305X)    += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt305x
+
+#
+# Ralink RT3883
+#
+load-$(CONFIG_SOC_RT3883)      += 0xffffffff80000000
+cflags-$(CONFIG_SOC_RT3883)    += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt3883
+
+#
+# Ralink MT7620
+#
+load-$(CONFIG_SOC_MT7620)      += 0xffffffff80000000
index 300990313e1b3acdd85486c1a8b743c4f8e4b653..83144c3fc5acc32c0b619b7b7f4ac9949a4e4e1e 100644 (file)
@@ -22,13 +22,22 @@ struct ralink_pinmux {
        struct ralink_pinmux_grp *mode;
        struct ralink_pinmux_grp *uart;
        int uart_shift;
+       u32 uart_mask;
        void (*wdt_reset)(void);
+       struct ralink_pinmux_grp *pci;
+       int pci_shift;
+       u32 pci_mask;
 };
-extern struct ralink_pinmux gpio_pinmux;
+extern struct ralink_pinmux rt_gpio_pinmux;
 
 struct ralink_soc_info {
        unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
        unsigned char *compatible;
+
+       unsigned long mem_base;
+       unsigned long mem_size;
+       unsigned long mem_size_min;
+       unsigned long mem_size_max;
 };
 extern struct ralink_soc_info soc_info;
 
index 1a69fb300955391b30f56e64e93850769774d1aa..18194fa93e8065f0916c90ce706ae2cb4e072ae4 100644 (file)
@@ -1 +1,4 @@
+obj-$(CONFIG_DTB_RT2880_EVAL) := rt2880_eval.dtb.o
 obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o
+obj-$(CONFIG_DTB_RT3883_EVAL) := rt3883_eval.dtb.o
+obj-$(CONFIG_DTB_MT7620A_EVAL) := mt7620a_eval.dtb.o
diff --git a/arch/mips/ralink/dts/mt7620a.dtsi b/arch/mips/ralink/dts/mt7620a.dtsi
new file mode 100644 (file)
index 0000000..08bf24f
--- /dev/null
@@ -0,0 +1,58 @@
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "ralink,mtk7620a-soc";
+
+       cpus {
+               cpu@0 {
+                       compatible = "mips,mips24KEc";
+               };
+       };
+
+       cpuintc: cpuintc@0 {
+               #address-cells = <0>;
+               #interrupt-cells = <1>;
+               interrupt-controller;
+               compatible = "mti,cpu-interrupt-controller";
+       };
+
+       palmbus@10000000 {
+               compatible = "palmbus";
+               reg = <0x10000000 0x200000>;
+                ranges = <0x0 0x10000000 0x1FFFFF>;
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               sysc@0 {
+                       compatible = "ralink,mt7620a-sysc";
+                       reg = <0x0 0x100>;
+               };
+
+               intc: intc@200 {
+                       compatible = "ralink,mt7620a-intc", "ralink,rt2880-intc";
+                       reg = <0x200 0x100>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+               };
+
+               memc@300 {
+                       compatible = "ralink,mt7620a-memc", "ralink,rt3050-memc";
+                       reg = <0x300 0x100>;
+               };
+
+               uartlite@c00 {
+                       compatible = "ralink,mt7620a-uart", "ralink,rt2880-uart", "ns16550a";
+                       reg = <0xc00 0x100>;
+
+                       interrupt-parent = <&intc>;
+                       interrupts = <12>;
+
+                       reg-shift = <2>;
+               };
+       };
+};
diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts
new file mode 100644 (file)
index 0000000..35eb874
--- /dev/null
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/include/ "mt7620a.dtsi"
+
+/ {
+       compatible = "ralink,mt7620a-eval-board", "ralink,mt7620a-soc";
+       model = "Ralink MT7620A evaluation board";
+
+       memory@0 {
+               reg = <0x0 0x2000000>;
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,57600";
+       };
+};
diff --git a/arch/mips/ralink/dts/rt2880.dtsi b/arch/mips/ralink/dts/rt2880.dtsi
new file mode 100644 (file)
index 0000000..182afde
--- /dev/null
@@ -0,0 +1,58 @@
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "ralink,rt2880-soc";
+
+       cpus {
+               cpu@0 {
+                       compatible = "mips,mips4KEc";
+               };
+       };
+
+       cpuintc: cpuintc@0 {
+               #address-cells = <0>;
+               #interrupt-cells = <1>;
+               interrupt-controller;
+               compatible = "mti,cpu-interrupt-controller";
+       };
+
+       palmbus@300000 {
+               compatible = "palmbus";
+               reg = <0x300000 0x200000>;
+                ranges = <0x0 0x300000 0x1FFFFF>;
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               sysc@0 {
+                       compatible = "ralink,rt2880-sysc";
+                       reg = <0x0 0x100>;
+               };
+
+               intc: intc@200 {
+                       compatible = "ralink,rt2880-intc";
+                       reg = <0x200 0x100>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+               };
+
+               memc@300 {
+                       compatible = "ralink,rt2880-memc";
+                       reg = <0x300 0x100>;
+               };
+
+               uartlite@c00 {
+                       compatible = "ralink,rt2880-uart", "ns16550a";
+                       reg = <0xc00 0x100>;
+
+                       interrupt-parent = <&intc>;
+                       interrupts = <8>;
+
+                       reg-shift = <2>;
+               };
+       };
+};
diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts
new file mode 100644 (file)
index 0000000..322d700
--- /dev/null
@@ -0,0 +1,46 @@
+/dts-v1/;
+
+/include/ "rt2880.dtsi"
+
+/ {
+       compatible = "ralink,rt2880-eval-board", "ralink,rt2880-soc";
+       model = "Ralink RT2880 evaluation board";
+
+       memory@0 {
+               reg = <0x8000000 0x2000000>;
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,57600";
+       };
+
+       cfi@1f000000 {
+               compatible = "cfi-flash";
+               reg = <0x1f000000 0x400000>;
+
+               bank-width = <2>;
+               device-width = <2>;
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               partition@0 {
+                       label = "uboot";
+                       reg = <0x0 0x30000>;
+                       read-only;
+               };
+               partition@30000 {
+                       label = "uboot-env";
+                       reg = <0x30000 0x10000>;
+                       read-only;
+               };
+               partition@40000 {
+                       label = "calibration";
+                       reg = <0x40000 0x10000>;
+                       read-only;
+               };
+               partition@50000 {
+                       label = "linux";
+                       reg = <0x50000 0x3b0000>;
+               };
+       };
+};
index 069d0660e1ddaf844922ba6dffb7e7a838f7284d..ef7da1e227e61309c990548fa6ed93b259fe0040 100644 (file)
@@ -1,7 +1,7 @@
 / {
        #address-cells = <1>;
        #size-cells = <1>;
-       compatible = "ralink,rt3050-soc", "ralink,rt3052-soc";
+       compatible = "ralink,rt3050-soc", "ralink,rt3052-soc", "ralink,rt3350-soc";
 
        cpus {
                cpu@0 {
@@ -9,10 +9,6 @@
                };
        };
 
-       chosen {
-               bootargs = "console=ttyS0,57600 init=/init";
-       };
-
        cpuintc: cpuintc@0 {
                #address-cells = <0>;
                #interrupt-cells = <1>;
@@ -23,7 +19,7 @@
        palmbus@10000000 {
                compatible = "palmbus";
                reg = <0x10000000 0x200000>;
-                ranges = <0x0 0x10000000 0x1FFFFF>;
+               ranges = <0x0 0x10000000 0x1FFFFF>;
 
                #address-cells = <1>;
                #size-cells = <1>;
                        reg = <0x0 0x100>;
                };
 
-               timer@100 {
-                       compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt";
-                       reg = <0x100 0x100>;
-               };
-
                intc: intc@200 {
                        compatible = "ralink,rt3052-intc", "ralink,rt2880-intc";
                        reg = <0x200 0x100>;
                        reg = <0x300 0x100>;
                };
 
-               gpio0: gpio@600 {
-                       compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
-                       reg = <0x600 0x34>;
-
-                       gpio-controller;
-                       #gpio-cells = <2>;
-
-                       ralink,ngpio = <24>;
-                       ralink,regs = [ 00 04 08 0c
-                                       20 24 28 2c
-                                       30 34 ];
-               };
-
-               gpio1: gpio@638 {
-                       compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
-                       reg = <0x638 0x24>;
-
-                       gpio-controller;
-                       #gpio-cells = <2>;
-
-                       ralink,ngpio = <16>;
-                       ralink,regs = [ 00 04 08 0c
-                                       10 14 18 1c
-                                       20 24 ];
-               };
-
-               gpio2: gpio@660 {
-                       compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
-                       reg = <0x660 0x24>;
-
-                       gpio-controller;
-                       #gpio-cells = <2>;
-
-                       ralink,ngpio = <12>;
-                       ralink,regs = [ 00 04 08 0c
-                                       10 14 18 1c
-                                       20 24 ];
-               };
-
                uartlite@c00 {
                        compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a";
                        reg = <0xc00 0x100>;
index 148a590bc4194784c369acef68c76b73d9c2240d..c18c9a84f4c4ee5794088f133332d3a391b834b7 100644 (file)
@@ -1,10 +1,8 @@
 /dts-v1/;
 
-/include/ "rt3050.dtsi"
+#include "rt3050.dtsi"
 
 / {
-       #address-cells = <1>;
-       #size-cells = <1>;
        compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
        model = "Ralink RT3052 evaluation board";
 
                reg = <0x0 0x2000000>;
        };
 
-       palmbus@10000000 {
-               sysc@0 {
-                       ralink,pinmmux = "uartlite", "spi";
-                       ralink,uartmux = "gpio";
-                       ralink,wdtmux = <0>;
-               };
+       chosen {
+               bootargs = "console=ttyS0,57600";
        };
 
        cfi@1f000000 {
diff --git a/arch/mips/ralink/dts/rt3883.dtsi b/arch/mips/ralink/dts/rt3883.dtsi
new file mode 100644 (file)
index 0000000..3b131dd
--- /dev/null
@@ -0,0 +1,58 @@
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "ralink,rt3883-soc";
+
+       cpus {
+               cpu@0 {
+                       compatible = "mips,mips74Kc";
+               };
+       };
+
+       cpuintc: cpuintc@0 {
+               #address-cells = <0>;
+               #interrupt-cells = <1>;
+               interrupt-controller;
+               compatible = "mti,cpu-interrupt-controller";
+       };
+
+       palmbus@10000000 {
+               compatible = "palmbus";
+               reg = <0x10000000 0x200000>;
+               ranges = <0x0 0x10000000 0x1FFFFF>;
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               sysc@0 {
+                       compatible = "ralink,rt3883-sysc", "ralink,rt3050-sysc";
+                       reg = <0x0 0x100>;
+               };
+
+               intc: intc@200 {
+                       compatible = "ralink,rt3883-intc", "ralink,rt2880-intc";
+                       reg = <0x200 0x100>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+
+                       interrupt-parent = <&cpuintc>;
+                       interrupts = <2>;
+               };
+
+               memc@300 {
+                       compatible = "ralink,rt3883-memc", "ralink,rt3050-memc";
+                       reg = <0x300 0x100>;
+               };
+
+               uartlite@c00 {
+                       compatible = "ralink,rt3883-uart", "ralink,rt2880-uart", "ns16550a";
+                       reg = <0xc00 0x100>;
+
+                       interrupt-parent = <&intc>;
+                       interrupts = <12>;
+
+                       reg-shift = <2>;
+               };
+       };
+};
diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts
new file mode 100644 (file)
index 0000000..2fa6b33
--- /dev/null
@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/include/ "rt3883.dtsi"
+
+/ {
+       compatible = "ralink,rt3883-eval-board", "ralink,rt3883-soc";
+       model = "Ralink RT3883 evaluation board";
+
+       memory@0 {
+               reg = <0x0 0x2000000>;
+       };
+
+       chosen {
+               bootargs = "console=ttyS0,57600";
+       };
+};
index c4ae47eb24abba22390902acd90e8f0430325162..b46d0419d09b52a3864e7f3f1bc0e2767ed97d4f 100644 (file)
 
 #include <asm/addrspace.h>
 
+#ifdef CONFIG_SOC_RT288X
+#define EARLY_UART_BASE         0x300c00
+#else
 #define EARLY_UART_BASE         0x10000c00
+#endif
 
 #define UART_REG_RX             0x00
 #define UART_REG_TX             0x04
index 6d054c5ec9ab03bf83950d152fdc173dbd0d8b56..320b1f1043fff108854ac7bab56ae1cf37e5699f 100644 (file)
@@ -31,6 +31,7 @@
 #define INTC_INT_GLOBAL                BIT(31)
 
 #define RALINK_CPU_IRQ_INTC    (MIPS_CPU_IRQ_BASE + 2)
+#define RALINK_CPU_IRQ_PCI     (MIPS_CPU_IRQ_BASE + 4)
 #define RALINK_CPU_IRQ_FE      (MIPS_CPU_IRQ_BASE + 5)
 #define RALINK_CPU_IRQ_WIFI    (MIPS_CPU_IRQ_BASE + 6)
 #define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7)
@@ -104,6 +105,9 @@ asmlinkage void plat_irq_dispatch(void)
        else if (pending & STATUSF_IP6)
                do_IRQ(RALINK_CPU_IRQ_WIFI);
 
+       else if (pending & STATUSF_IP4)
+               do_IRQ(RALINK_CPU_IRQ_PCI);
+
        else if (pending & STATUSF_IP2)
                do_IRQ(RALINK_CPU_IRQ_INTC);
 
@@ -162,6 +166,7 @@ static int __init intc_of_init(struct device_node *node,
        irq_set_chained_handler(irq, ralink_intc_irq_handler);
        irq_set_handler_data(irq, domain);
 
+       /* tell the kernel which irq is used for performance monitoring */
        cp0_perfcount_irq = irq_create_mapping(domain, 9);
 
        return 0;
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
new file mode 100644 (file)
index 0000000..0018b1a
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/mt7620.h>
+
+#include "common.h"
+
+/* does the board have sdram or ddram */
+static int dram_type;
+
+/* the pll dividers */
+static u32 mt7620_clk_divider[] = { 2, 3, 4, 8 };
+
+static struct ralink_pinmux_grp mode_mux[] = {
+       {
+               .name = "i2c",
+               .mask = MT7620_GPIO_MODE_I2C,
+               .gpio_first = 1,
+               .gpio_last = 2,
+       }, {
+               .name = "spi",
+               .mask = MT7620_GPIO_MODE_SPI,
+               .gpio_first = 3,
+               .gpio_last = 6,
+       }, {
+               .name = "uartlite",
+               .mask = MT7620_GPIO_MODE_UART1,
+               .gpio_first = 15,
+               .gpio_last = 16,
+       }, {
+               .name = "wdt",
+               .mask = MT7620_GPIO_MODE_WDT,
+               .gpio_first = 17,
+               .gpio_last = 17,
+       }, {
+               .name = "mdio",
+               .mask = MT7620_GPIO_MODE_MDIO,
+               .gpio_first = 22,
+               .gpio_last = 23,
+       }, {
+               .name = "rgmii1",
+               .mask = MT7620_GPIO_MODE_RGMII1,
+               .gpio_first = 24,
+               .gpio_last = 35,
+       }, {
+               .name = "spi refclk",
+               .mask = MT7620_GPIO_MODE_SPI_REF_CLK,
+               .gpio_first = 37,
+               .gpio_last = 39,
+       }, {
+               .name = "jtag",
+               .mask = MT7620_GPIO_MODE_JTAG,
+               .gpio_first = 40,
+               .gpio_last = 44,
+       }, {
+               /* shared lines with jtag */
+               .name = "ephy",
+               .mask = MT7620_GPIO_MODE_EPHY,
+               .gpio_first = 40,
+               .gpio_last = 44,
+       }, {
+               .name = "nand",
+               .mask = MT7620_GPIO_MODE_JTAG,
+               .gpio_first = 45,
+               .gpio_last = 59,
+       }, {
+               .name = "rgmii2",
+               .mask = MT7620_GPIO_MODE_RGMII2,
+               .gpio_first = 60,
+               .gpio_last = 71,
+       }, {
+               .name = "wled",
+               .mask = MT7620_GPIO_MODE_WLED,
+               .gpio_first = 72,
+               .gpio_last = 72,
+       }, {0}
+};
+
+static struct ralink_pinmux_grp uart_mux[] = {
+       {
+               .name = "uartf",
+               .mask = MT7620_GPIO_MODE_UARTF,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "pcm uartf",
+               .mask = MT7620_GPIO_MODE_PCM_UARTF,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "pcm i2s",
+               .mask = MT7620_GPIO_MODE_PCM_I2S,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "i2s uartf",
+               .mask = MT7620_GPIO_MODE_I2S_UARTF,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "pcm gpio",
+               .mask = MT7620_GPIO_MODE_PCM_GPIO,
+               .gpio_first = 11,
+               .gpio_last = 14,
+       }, {
+               .name = "gpio uartf",
+               .mask = MT7620_GPIO_MODE_GPIO_UARTF,
+               .gpio_first = 7,
+               .gpio_last = 10,
+       }, {
+               .name = "gpio i2s",
+               .mask = MT7620_GPIO_MODE_GPIO_I2S,
+               .gpio_first = 7,
+               .gpio_last = 10,
+       }, {
+               .name = "gpio",
+               .mask = MT7620_GPIO_MODE_GPIO,
+       }, {0}
+};
+
+struct ralink_pinmux rt_gpio_pinmux = {
+       .mode = mode_mux,
+       .uart = uart_mux,
+       .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT,
+       .uart_mask = MT7620_GPIO_MODE_UART0_MASK,
+};
+
+void __init ralink_clk_init(void)
+{
+       unsigned long cpu_rate, sys_rate;
+       u32 c0 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG0);
+       u32 c1 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG1);
+       u32 swconfig = (c0 >> CPLL_SW_CONFIG_SHIFT) & CPLL_SW_CONFIG_MASK;
+       u32 cpu_clk = (c1 >> CPLL_CPU_CLK_SHIFT) & CPLL_CPU_CLK_MASK;
+
+       if (cpu_clk) {
+               cpu_rate = 480000000;
+       } else if (!swconfig) {
+               cpu_rate = 600000000;
+       } else {
+               u32 m = (c0 >> CPLL_MULT_RATIO_SHIFT) & CPLL_MULT_RATIO;
+               u32 d = (c0 >> CPLL_DIV_RATIO_SHIFT) & CPLL_DIV_RATIO;
+
+               cpu_rate = ((40 * (m + 24)) / mt7620_clk_divider[d]) * 1000000;
+       }
+
+       if (dram_type == SYSCFG0_DRAM_TYPE_SDRAM)
+               sys_rate = cpu_rate / 4;
+       else
+               sys_rate = cpu_rate / 3;
+
+       ralink_clk_add("cpu", cpu_rate);
+       ralink_clk_add("10000100.timer", 40000000);
+       ralink_clk_add("10000500.uart", 40000000);
+       ralink_clk_add("10000c00.uartlite", 40000000);
+}
+
+void __init ralink_of_remap(void)
+{
+       rt_sysc_membase = plat_of_remap_node("ralink,mt7620a-sysc");
+       rt_memc_membase = plat_of_remap_node("ralink,mt7620a-memc");
+
+       if (!rt_sysc_membase || !rt_memc_membase)
+               panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE);
+       unsigned char *name = NULL;
+       u32 n0;
+       u32 n1;
+       u32 rev;
+       u32 cfg0;
+
+       n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+       n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+
+       if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) {
+               name = "MT7620N";
+               soc_info->compatible = "ralink,mt7620n-soc";
+       } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) {
+               name = "MT7620A";
+               soc_info->compatible = "ralink,mt7620a-soc";
+       } else {
+               panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+       }
+
+       rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
+
+       snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+               "Ralink %s ver:%u eco:%u",
+               name,
+               (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
+               (rev & CHIP_REV_ECO_MASK));
+
+       cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0);
+       dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK;
+
+       switch (dram_type) {
+       case SYSCFG0_DRAM_TYPE_SDRAM:
+               soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
+               soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
+               break;
+
+       case SYSCFG0_DRAM_TYPE_DDR1:
+               soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
+               soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
+               break;
+
+       case SYSCFG0_DRAM_TYPE_DDR2:
+               soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
+               soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
+               break;
+       default:
+               BUG();
+       }
+       soc_info->mem_base = MT7620_DRAM_BASE;
+}
index 4165e70775be52dbf5df31d344204aa1b5ba40de..fb1569580def7c25a03a7f1348315c4e93e92e3c 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/io.h>
 #include <linux/clk.h>
 #include <linux/init.h>
+#include <linux/sizes.h>
 #include <linux/of_fdt.h>
 #include <linux/kernel.h>
 #include <linux/bootmem.h>
@@ -85,6 +86,14 @@ void __init plat_mem_setup(void)
         * parsed resulting in our memory appearing
         */
        __dt_setup_arch(&__dtb_start);
+
+       if (soc_info.mem_size)
+               add_memory_region(soc_info.mem_base, soc_info.mem_size,
+                                 BOOT_MEM_RAM);
+       else
+               detect_memory_region(soc_info.mem_base,
+                                    soc_info.mem_size_min * SZ_1M,
+                                    soc_info.mem_size_max * SZ_1M);
 }
 
 static int __init plat_of_setup(void)
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
new file mode 100644 (file)
index 0000000..f87de1a
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt288x.h>
+
+#include "common.h"
+
+static struct ralink_pinmux_grp mode_mux[] = {
+       {
+               .name = "i2c",
+               .mask = RT2880_GPIO_MODE_I2C,
+               .gpio_first = 1,
+               .gpio_last = 2,
+       }, {
+               .name = "spi",
+               .mask = RT2880_GPIO_MODE_SPI,
+               .gpio_first = 3,
+               .gpio_last = 6,
+       }, {
+               .name = "uartlite",
+               .mask = RT2880_GPIO_MODE_UART0,
+               .gpio_first = 7,
+               .gpio_last = 14,
+       }, {
+               .name = "jtag",
+               .mask = RT2880_GPIO_MODE_JTAG,
+               .gpio_first = 17,
+               .gpio_last = 21,
+       }, {
+               .name = "mdio",
+               .mask = RT2880_GPIO_MODE_MDIO,
+               .gpio_first = 22,
+               .gpio_last = 23,
+       }, {
+               .name = "sdram",
+               .mask = RT2880_GPIO_MODE_SDRAM,
+               .gpio_first = 24,
+               .gpio_last = 39,
+       }, {
+               .name = "pci",
+               .mask = RT2880_GPIO_MODE_PCI,
+               .gpio_first = 40,
+               .gpio_last = 71,
+       }, {0}
+};
+
+static void rt288x_wdt_reset(void)
+{
+       u32 t;
+
+       /* enable WDT reset output on pin SRAM_CS_N */
+       t = rt_sysc_r32(SYSC_REG_CLKCFG);
+       t |= CLKCFG_SRAM_CS_N_WDT;
+       rt_sysc_w32(t, SYSC_REG_CLKCFG);
+}
+
+struct ralink_pinmux rt_gpio_pinmux = {
+       .mode = mode_mux,
+       .wdt_reset = rt288x_wdt_reset,
+};
+
+void __init ralink_clk_init(void)
+{
+       unsigned long cpu_rate;
+       u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+       t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK);
+
+       switch (t) {
+       case SYSTEM_CONFIG_CPUCLK_250:
+               cpu_rate = 250000000;
+               break;
+       case SYSTEM_CONFIG_CPUCLK_266:
+               cpu_rate = 266666667;
+               break;
+       case SYSTEM_CONFIG_CPUCLK_280:
+               cpu_rate = 280000000;
+               break;
+       case SYSTEM_CONFIG_CPUCLK_300:
+               cpu_rate = 300000000;
+               break;
+       }
+
+       ralink_clk_add("cpu", cpu_rate);
+       ralink_clk_add("300100.timer", cpu_rate / 2);
+       ralink_clk_add("300120.watchdog", cpu_rate / 2);
+       ralink_clk_add("300500.uart", cpu_rate / 2);
+       ralink_clk_add("300c00.uartlite", cpu_rate / 2);
+       ralink_clk_add("400000.ethernet", cpu_rate / 2);
+}
+
+void __init ralink_of_remap(void)
+{
+       rt_sysc_membase = plat_of_remap_node("ralink,rt2880-sysc");
+       rt_memc_membase = plat_of_remap_node("ralink,rt2880-memc");
+
+       if (!rt_sysc_membase || !rt_memc_membase)
+               panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE);
+       const char *name;
+       u32 n0;
+       u32 n1;
+       u32 id;
+
+       n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+       n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+       id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
+
+       if (n0 == RT2880_CHIP_NAME0 && n1 == RT2880_CHIP_NAME1) {
+               soc_info->compatible = "ralink,r2880-soc";
+               name = "RT2880";
+       } else {
+               panic("rt288x: unknown SoC, n0:%08x n1:%08x", n0, n1);
+       }
+
+       snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+               "Ralink %s id:%u rev:%u",
+               name,
+               (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
+               (id & CHIP_ID_REV_MASK));
+
+       soc_info->mem_base = RT2880_SDRAM_BASE;
+       soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
+       soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
+}
index 0a4bbdcf59d97d9d82b01fc8eb8ab9fffd5eba91..ca7ee3a33790fc074ed4788f8fb1e1412babe187 100644 (file)
@@ -22,7 +22,7 @@
 
 enum rt305x_soc_type rt305x_soc;
 
-struct ralink_pinmux_grp mode_mux[] = {
+static struct ralink_pinmux_grp mode_mux[] = {
        {
                .name = "i2c",
                .mask = RT305X_GPIO_MODE_I2C,
@@ -61,7 +61,7 @@ struct ralink_pinmux_grp mode_mux[] = {
        }, {0}
 };
 
-struct ralink_pinmux_grp uart_mux[] = {
+static struct ralink_pinmux_grp uart_mux[] = {
        {
                .name = "uartf",
                .mask = RT305X_GPIO_MODE_UARTF,
@@ -91,19 +91,19 @@ struct ralink_pinmux_grp uart_mux[] = {
                .name = "gpio uartf",
                .mask = RT305X_GPIO_MODE_GPIO_UARTF,
                .gpio_first = RT305X_GPIO_7,
-               .gpio_last = RT305X_GPIO_14,
+               .gpio_last = RT305X_GPIO_10,
        }, {
                .name = "gpio i2s",
                .mask = RT305X_GPIO_MODE_GPIO_I2S,
                .gpio_first = RT305X_GPIO_7,
-               .gpio_last = RT305X_GPIO_14,
+               .gpio_last = RT305X_GPIO_10,
        }, {
                .name = "gpio",
                .mask = RT305X_GPIO_MODE_GPIO,
        }, {0}
 };
 
-void rt305x_wdt_reset(void)
+static void rt305x_wdt_reset(void)
 {
        u32 t;
 
@@ -114,16 +114,53 @@ void rt305x_wdt_reset(void)
        rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
 }
 
-struct ralink_pinmux gpio_pinmux = {
+struct ralink_pinmux rt_gpio_pinmux = {
        .mode = mode_mux,
        .uart = uart_mux,
        .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT,
+       .uart_mask = RT305X_GPIO_MODE_UART0_MASK,
        .wdt_reset = rt305x_wdt_reset,
 };
 
+static unsigned long rt5350_get_mem_size(void)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
+       unsigned long ret;
+       u32 t;
+
+       t = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG);
+       t = (t >> RT5350_SYSCFG0_DRAM_SIZE_SHIFT) &
+               RT5350_SYSCFG0_DRAM_SIZE_MASK;
+
+       switch (t) {
+       case RT5350_SYSCFG0_DRAM_SIZE_2M:
+               ret = 2;
+               break;
+       case RT5350_SYSCFG0_DRAM_SIZE_8M:
+               ret = 8;
+               break;
+       case RT5350_SYSCFG0_DRAM_SIZE_16M:
+               ret = 16;
+               break;
+       case RT5350_SYSCFG0_DRAM_SIZE_32M:
+               ret = 32;
+               break;
+       case RT5350_SYSCFG0_DRAM_SIZE_64M:
+               ret = 64;
+               break;
+       default:
+               panic("rt5350: invalid DRAM size: %u", t);
+               break;
+       }
+
+       return ret;
+}
+
 void __init ralink_clk_init(void)
 {
        unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate;
+       unsigned long wmac_rate = 40000000;
+
        u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
 
        if (soc_is_rt305x() || soc_is_rt3350()) {
@@ -176,11 +213,21 @@ void __init ralink_clk_init(void)
                BUG();
        }
 
+       if (soc_is_rt3352() || soc_is_rt5350()) {
+               u32 val = rt_sysc_r32(RT3352_SYSC_REG_SYSCFG0);
+
+               if (!(val & RT3352_CLKCFG0_XTAL_SEL))
+                       wmac_rate = 20000000;
+       }
+
        ralink_clk_add("cpu", cpu_rate);
        ralink_clk_add("10000b00.spi", sys_rate);
        ralink_clk_add("10000100.timer", wdt_rate);
+       ralink_clk_add("10000120.watchdog", wdt_rate);
        ralink_clk_add("10000500.uart", uart_rate);
        ralink_clk_add("10000c00.uartlite", uart_rate);
+       ralink_clk_add("10100000.ethernet", sys_rate);
+       ralink_clk_add("10180000.wmac", wmac_rate);
 }
 
 void __init ralink_of_remap(void)
@@ -239,4 +286,15 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
                name,
                (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
                (id & CHIP_ID_REV_MASK));
+
+       soc_info->mem_base = RT305X_SDRAM_BASE;
+       if (soc_is_rt5350()) {
+               soc_info->mem_size = rt5350_get_mem_size();
+       } else if (soc_is_rt305x() || soc_is_rt3350()) {
+               soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
+               soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
+       } else if (soc_is_rt3352()) {
+               soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
+               soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
+       }
 }
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
new file mode 100644 (file)
index 0000000..b474ac2
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt3883.h>
+
+#include "common.h"
+
+static struct ralink_pinmux_grp mode_mux[] = {
+       {
+               .name = "i2c",
+               .mask = RT3883_GPIO_MODE_I2C,
+               .gpio_first = RT3883_GPIO_I2C_SD,
+               .gpio_last = RT3883_GPIO_I2C_SCLK,
+       }, {
+               .name = "spi",
+               .mask = RT3883_GPIO_MODE_SPI,
+               .gpio_first = RT3883_GPIO_SPI_CS0,
+               .gpio_last = RT3883_GPIO_SPI_MISO,
+       }, {
+               .name = "uartlite",
+               .mask = RT3883_GPIO_MODE_UART1,
+               .gpio_first = RT3883_GPIO_UART1_TXD,
+               .gpio_last = RT3883_GPIO_UART1_RXD,
+       }, {
+               .name = "jtag",
+               .mask = RT3883_GPIO_MODE_JTAG,
+               .gpio_first = RT3883_GPIO_JTAG_TDO,
+               .gpio_last = RT3883_GPIO_JTAG_TCLK,
+       }, {
+               .name = "mdio",
+               .mask = RT3883_GPIO_MODE_MDIO,
+               .gpio_first = RT3883_GPIO_MDIO_MDC,
+               .gpio_last = RT3883_GPIO_MDIO_MDIO,
+       }, {
+               .name = "ge1",
+               .mask = RT3883_GPIO_MODE_GE1,
+               .gpio_first = RT3883_GPIO_GE1_TXD0,
+               .gpio_last = RT3883_GPIO_GE1_RXCLK,
+       }, {
+               .name = "ge2",
+               .mask = RT3883_GPIO_MODE_GE2,
+               .gpio_first = RT3883_GPIO_GE2_TXD0,
+               .gpio_last = RT3883_GPIO_GE2_RXCLK,
+       }, {
+               .name = "pci",
+               .mask = RT3883_GPIO_MODE_PCI,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "lna a",
+               .mask = RT3883_GPIO_MODE_LNA_A,
+               .gpio_first = RT3883_GPIO_LNA_PE_A0,
+               .gpio_last = RT3883_GPIO_LNA_PE_A2,
+       }, {
+               .name = "lna g",
+               .mask = RT3883_GPIO_MODE_LNA_G,
+               .gpio_first = RT3883_GPIO_LNA_PE_G0,
+               .gpio_last = RT3883_GPIO_LNA_PE_G2,
+       }, {0}
+};
+
+static struct ralink_pinmux_grp uart_mux[] = {
+       {
+               .name = "uartf",
+               .mask = RT3883_GPIO_MODE_UARTF,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "pcm uartf",
+               .mask = RT3883_GPIO_MODE_PCM_UARTF,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "pcm i2s",
+               .mask = RT3883_GPIO_MODE_PCM_I2S,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "i2s uartf",
+               .mask = RT3883_GPIO_MODE_I2S_UARTF,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "pcm gpio",
+               .mask = RT3883_GPIO_MODE_PCM_GPIO,
+               .gpio_first = RT3883_GPIO_11,
+               .gpio_last = RT3883_GPIO_14,
+       }, {
+               .name = "gpio uartf",
+               .mask = RT3883_GPIO_MODE_GPIO_UARTF,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_10,
+       }, {
+               .name = "gpio i2s",
+               .mask = RT3883_GPIO_MODE_GPIO_I2S,
+               .gpio_first = RT3883_GPIO_7,
+               .gpio_last = RT3883_GPIO_10,
+       }, {
+               .name = "gpio",
+               .mask = RT3883_GPIO_MODE_GPIO,
+       }, {0}
+};
+
+static struct ralink_pinmux_grp pci_mux[] = {
+       {
+               .name = "pci-dev",
+               .mask = 0,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "pci-host2",
+               .mask = 1,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "pci-host1",
+               .mask = 2,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "pci-fnc",
+               .mask = 3,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {
+               .name = "pci-gpio",
+               .mask = 7,
+               .gpio_first = RT3883_GPIO_PCI_AD0,
+               .gpio_last = RT3883_GPIO_PCI_AD31,
+       }, {0}
+};
+
+static void rt3883_wdt_reset(void)
+{
+       u32 t;
+
+       /* enable WDT reset output on GPIO 2 */
+       t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
+       t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT;
+       rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
+}
+
+struct ralink_pinmux rt_gpio_pinmux = {
+       .mode = mode_mux,
+       .uart = uart_mux,
+       .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT,
+       .uart_mask = RT3883_GPIO_MODE_UART0_MASK,
+       .wdt_reset = rt3883_wdt_reset,
+       .pci = pci_mux,
+       .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT,
+       .pci_mask = RT3883_GPIO_MODE_PCI_MASK,
+};
+
+void __init ralink_clk_init(void)
+{
+       unsigned long cpu_rate, sys_rate;
+       u32 syscfg0;
+       u32 clksel;
+       u32 ddr2;
+
+       syscfg0 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG0);
+       clksel = ((syscfg0 >> RT3883_SYSCFG0_CPUCLK_SHIFT) &
+               RT3883_SYSCFG0_CPUCLK_MASK);
+       ddr2 = syscfg0 & RT3883_SYSCFG0_DRAM_TYPE_DDR2;
+
+       switch (clksel) {
+       case RT3883_SYSCFG0_CPUCLK_250:
+               cpu_rate = 250000000;
+               sys_rate = (ddr2) ? 125000000 : 83000000;
+               break;
+       case RT3883_SYSCFG0_CPUCLK_384:
+               cpu_rate = 384000000;
+               sys_rate = (ddr2) ? 128000000 : 96000000;
+               break;
+       case RT3883_SYSCFG0_CPUCLK_480:
+               cpu_rate = 480000000;
+               sys_rate = (ddr2) ? 160000000 : 120000000;
+               break;
+       case RT3883_SYSCFG0_CPUCLK_500:
+               cpu_rate = 500000000;
+               sys_rate = (ddr2) ? 166000000 : 125000000;
+               break;
+       }
+
+       ralink_clk_add("cpu", cpu_rate);
+       ralink_clk_add("10000100.timer", sys_rate);
+       ralink_clk_add("10000120.watchdog", sys_rate);
+       ralink_clk_add("10000500.uart", 40000000);
+       ralink_clk_add("10000b00.spi", sys_rate);
+       ralink_clk_add("10000c00.uartlite", 40000000);
+       ralink_clk_add("10100000.ethernet", sys_rate);
+}
+
+void __init ralink_of_remap(void)
+{
+       rt_sysc_membase = plat_of_remap_node("ralink,rt3883-sysc");
+       rt_memc_membase = plat_of_remap_node("ralink,rt3883-memc");
+
+       if (!rt_sysc_membase || !rt_memc_membase)
+               panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE);
+       const char *name;
+       u32 n0;
+       u32 n1;
+       u32 id;
+
+       n0 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID0_3);
+       n1 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID4_7);
+       id = __raw_readl(sysc + RT3883_SYSC_REG_REVID);
+
+       if (n0 == RT3883_CHIP_NAME0 && n1 == RT3883_CHIP_NAME1) {
+               soc_info->compatible = "ralink,rt3883-soc";
+               name = "RT3883";
+       } else {
+               panic("rt3883: unknown SoC, n0:%08x n1:%08x", n0, n1);
+       }
+
+       snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+               "Ralink %s ver:%u eco:%u",
+               name,
+               (id >> RT3883_REVID_VER_ID_SHIFT) & RT3883_REVID_VER_ID_MASK,
+               (id & RT3883_REVID_ECO_ID_MASK));
+
+       soc_info->mem_base = RT3883_SDRAM_BASE;
+       soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
+       soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
+}
index 1d1919a44e88c350685866d8420b24fa9e168dc9..7a53b1e28a93dfa512da4e5a6dff6f49e9ae9baa 100644 (file)
@@ -114,7 +114,7 @@ void __init replicate_kernel_text()
  * data structures on the first couple of pages of the first slot of each
  * node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
  */
-pfn_t node_getfirstfree(cnodeid_t cnode)
+unsigned long node_getfirstfree(cnodeid_t cnode)
 {
        unsigned long loadbase = REP_BASE;
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
index 5f2bddb1860e11c1b32035593b3376b758c37570..1230f56429d7334ea418f41921e709a9b737586a 100644 (file)
@@ -255,14 +255,14 @@ static void __init dump_topology(void)
        }
 }
 
-static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
+static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot)
 {
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
 
-       return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
+       return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
 }
 
-static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
+static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
 {
        nasid_t nasid;
        lboard_t *brd;
@@ -353,7 +353,7 @@ static void __init mlreset(void)
 
 static void __init szmem(void)
 {
-       pfn_t slot_psize, slot0sz = 0, nodebytes;       /* Hack to detect problem configs */
+       unsigned long slot_psize, slot0sz = 0, nodebytes;       /* Hack to detect problem configs */
        int slot;
        cnodeid_t node;
 
@@ -390,10 +390,10 @@ static void __init szmem(void)
 
 static void __init node_mem_init(cnodeid_t node)
 {
-       pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
-       pfn_t slot_freepfn = node_getfirstfree(node);
+       unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
+       unsigned long slot_freepfn = node_getfirstfree(node);
        unsigned long bootmap_size;
-       pfn_t start_pfn, end_pfn;
+       unsigned long start_pfn, end_pfn;
 
        get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
 
@@ -467,7 +467,7 @@ void __init paging_init(void)
        pagetable_init();
 
        for_each_online_node(node) {
-               pfn_t start_pfn, end_pfn;
+               unsigned long start_pfn, end_pfn;
 
                get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
 
index fff58ac176f36328914c06801234f1f31c38389c..2e21b761cb9c771586797a50f5eda5666368a81e 100644 (file)
@@ -69,7 +69,7 @@ static void rt_set_mode(enum clock_event_mode mode,
        /* Nothing to do ...  */
 }
 
-int rt_timer_irq;
+unsigned int rt_timer_irq;
 
 static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
 static DEFINE_PER_CPU(char [11], hub_rt_name);
index f517e08e7f0d4727b771db06429875b140180722..a134ff4da12ee1e4ad39a78e514328f578573ca3 100644 (file)
@@ -59,11 +59,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
        current->comm, current->pid, r20);
     return -ENOSYS;
 }
-
-asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi,
-                                        u32 mask_lo, int fd,
-                                        const char __user *pathname)
-{
-       return sys_fanotify_mark(fan_fd, flags, ((u64)mask_hi << 32) | mask_lo,
-                                fd, pathname);
-}
index 3fe5259e2fea872be5da2e333427a935f8a012d0..915fbb4fc2fe4e7534dbf78eb39317013b03b014 100644 (file)
@@ -150,7 +150,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
        CURRENT_THREAD_INFO(r11, r1)
        ld      r10,TI_FLAGS(r11)
        andi.   r11,r10,_TIF_SYSCALL_T_OR_A
-       bne-    syscall_dotrace
+       bne     syscall_dotrace
 .Lsyscall_dotrace_cont:
        cmpldi  0,r0,NR_syscalls
        bge-    syscall_enosys
index cd6e19d263b32ca403f49fa4409ca3fab6f88411..8a285876aef8a8e667091b5560ec1aa8756f5bc7 100644 (file)
@@ -126,11 +126,3 @@ asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
 
        return sys_sync_file_range(fd, offset, nbytes, flags);
 }
-
-asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags,
-                                        unsigned mask_hi, unsigned mask_lo,
-                                        int dfd, const char __user *pathname)
-{
-       u64 mask = ((u64)mask_hi << 32) | mask_lo;
-       return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
-}
index 2d72d9e96c153b360ae5eb13d68c7d0370b9139e..9cb1b975b3532f3cc6237ab1cd168146f99a9904 100644 (file)
@@ -793,10 +793,6 @@ ENTRY(sys32_stime_wrapper)
        llgtr   %r2,%r2                 # long *
        jg      compat_sys_stime        # branch to system call
 
-ENTRY(sys32_sysctl_wrapper)
-       llgtr   %r2,%r2                 # struct compat_sysctl_args *
-       jg      compat_sys_sysctl
-
 ENTRY(sys32_fstat64_wrapper)
        llgfr   %r2,%r2                 # unsigned long
        llgtr   %r3,%r3                 # struct stat64 *
@@ -1349,15 +1345,6 @@ ENTRY(sys_fanotify_init_wrapper)
        llgfr   %r3,%r3                 # unsigned int
        jg      sys_fanotify_init       # branch to system call
 
-ENTRY(sys_fanotify_mark_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgfr   %r3,%r3                 # unsigned int
-       sllg    %r4,%r4,32              # get high word of 64bit mask
-       lr      %r4,%r5                 # get low word of 64bit mask
-       llgfr   %r5,%r6                 # unsigned int
-       llgt    %r6,164(%r15)           # char *
-       jg      sys_fanotify_mark       # branch to system call
-
 ENTRY(sys_prlimit64_wrapper)
        lgfr    %r2,%r2                 # pid_t
        llgfr   %r3,%r3                 # unsigned int
index 9f214e992eed2bbbdfbd2bb77a278a92de2dddf5..913410bd74a335c0693637ccb69e49cb89fd63c1 100644 (file)
@@ -157,7 +157,7 @@ SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper)               /* 145 */
 SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper)
 SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper)
 SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper)
-SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper)
+SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl)
 SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper)               /* 150 */
 SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper)
 SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper)
@@ -341,7 +341,7 @@ SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev)
 SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
 SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
 SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper)
-SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper)
+SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark)
 SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper)
 SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */
 SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at)
index 2e680b5245c9b4cc0b2b8941414e11ce540b424d..f7c72b6efc27556cd2e2de7a74539b1ba21831ce 100644 (file)
@@ -239,15 +239,6 @@ do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
        nop
        nop
 
-       .globl          sys32_fanotify_mark
-sys32_fanotify_mark:
-       sethi           %hi(sys_fanotify_mark), %g1
-       sllx            %o2, 32, %o2
-       or              %o2, %o3, %o2
-       mov             %o4, %o3
-       jmpl            %g1 + %lo(sys_fanotify_mark), %g0
-        mov            %o5, %o4
-
        .section        __ex_table,"a"
        .align          4
        .word           1b, __retl_efault, 2b, __retl_efault
index 8fd9320802153c5d4ece190b8dd535b8a3747679..6d81597064b6b5c7efa0861fc6c5c230e3c4984f 100644 (file)
@@ -84,7 +84,7 @@ sys_call_table32:
        .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
 /*320*/        .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
        .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
-/*330*/        .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
+/*330*/        .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
        .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module
 
index cfe79c9529b353f2d132cd004e336b578f2faee4..f9e86253931407bcf851c6e84f0c007480ce6cf1 100644 (file)
 #include <asm/syscalls.h>
 #include <asm/cacheflush.h>
 
-/* Note: used by the compat code even in 64-bit Linux. */
-SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
-               unsigned long, prot, unsigned long, flags,
-               unsigned long, fd, unsigned long, off_4k)
-{
-       return sys_mmap_pgoff(addr, len, prot, flags, fd,
-                             off_4k);
-}
-
 /* Provide the actual syscall number to call mapping. */
 #undef __SYSCALL
 #define __SYSCALL(nr, call)    [nr] = (call),
 
+#define sys_mmap2 sys_mmap_pgoff
 /* Note that we don't include <linux/unistd.h> but <asm/unistd.h> */
 void *sys_call_table[__NR_syscalls] = {
        [0 ... __NR_syscalls-1] = sys_ni_syscall,
index 4e4907c67d92d0940270de5c7c55c731681afaf5..8e0ceecdc95790d7a53eb3fe5bc1c3867bcb9e7f 100644 (file)
@@ -243,12 +243,3 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
        return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
                             ((u64)len_hi << 32) | len_lo);
 }
-
-asmlinkage long sys32_fanotify_mark(int fanotify_fd, unsigned int flags,
-                                   u32 mask_lo, u32 mask_hi,
-                                   int fd, const char  __user *pathname)
-{
-       return sys_fanotify_mark(fanotify_fd, flags,
-                                ((u64)mask_hi << 32) | mask_lo,
-                                fd, pathname);
-}
index 0ef202e232d642e97f9bb2c5a389a35ef10cc99d..82c34ee25a651760c9950ce6c54625896fd9ea2f 100644 (file)
@@ -50,9 +50,6 @@ asmlinkage long sys32_fallocate(int, int, unsigned,
 asmlinkage long sys32_sigreturn(void);
 asmlinkage long sys32_rt_sigreturn(void);
 
-asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
-                                   const char __user *);
-
 #endif /* CONFIG_COMPAT */
 
 #endif /* _ASM_X86_SYS_IA32_H */
index 5f87b35fd2ef5d6a9cc4eb6e7cdf1c822d4e679d..2917a6452c496e20d254589f789e8ad0f499f6dc 100644 (file)
@@ -37,8 +37,8 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *);
 unsigned long sys_sigreturn(void);
 
 /* kernel/vm86_32.c */
-int sys_vm86old(struct vm86_struct __user *);
-int sys_vm86(unsigned long, unsigned long);
+asmlinkage long sys_vm86old(struct vm86_struct __user *);
+asmlinkage long sys_vm86(unsigned long, unsigned long);
 
 #else /* CONFIG_X86_32 */
 
index b3a4866661c5c426efb0504294a3c7228ccd107a..2af848dfa75424b7798924dd9a91524af7f088e3 100644 (file)
 #define MSR_CORE_C6_RESIDENCY          0x000003fd
 #define MSR_CORE_C7_RESIDENCY          0x000003fe
 #define MSR_PKG_C2_RESIDENCY           0x0000060d
+#define MSR_PKG_C8_RESIDENCY           0x00000630
+#define MSR_PKG_C9_RESIDENCY           0x00000631
+#define MSR_PKG_C10_RESIDENCY          0x00000632
 
 /* Run Time Average Power Limiting (RAPL) Interface */
 
index 1cf5766dde169448e109101e981632bb6a6407a5..e8edcf52e06911fe5446e543f40368ea69114225 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/capability.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
+#include <linux/syscalls.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/signal.h>
@@ -48,7 +49,6 @@
 #include <asm/io.h>
 #include <asm/tlbflush.h>
 #include <asm/irq.h>
-#include <asm/syscalls.h>
 
 /*
  * Known problems:
@@ -202,36 +202,32 @@ out:
 static int do_vm86_irq_handling(int subfunction, int irqnumber);
 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
 
-int sys_vm86old(struct vm86_struct __user *v86)
+SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
 {
        struct kernel_vm86_struct info; /* declare this _on top_,
                                         * this avoids wasting of stack space.
                                         * This remains on the stack until we
                                         * return to 32 bit user space.
                                         */
-       struct task_struct *tsk;
-       int tmp, ret = -EPERM;
+       struct task_struct *tsk = current;
+       int tmp;
 
-       tsk = current;
        if (tsk->thread.saved_sp0)
-               goto out;
+               return -EPERM;
        tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
                                       offsetof(struct kernel_vm86_struct, vm86plus) -
                                       sizeof(info.regs));
-       ret = -EFAULT;
        if (tmp)
-               goto out;
+               return -EFAULT;
        memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
        info.regs32 = current_pt_regs();
        tsk->thread.vm86_info = v86;
        do_sys_vm86(&info, tsk);
-       ret = 0;        /* we never return here */
-out:
-       return ret;
+       return 0;       /* we never return here */
 }
 
 
-int sys_vm86(unsigned long cmd, unsigned long arg)
+SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
 {
        struct kernel_vm86_struct info; /* declare this _on top_,
                                         * this avoids wasting of stack space.
@@ -239,7 +235,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
                                         * return to 32 bit user space.
                                         */
        struct task_struct *tsk;
-       int tmp, ret;
+       int tmp;
        struct vm86plus_struct __user *v86;
 
        tsk = current;
@@ -248,8 +244,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
        case VM86_FREE_IRQ:
        case VM86_GET_IRQ_BITS:
        case VM86_GET_AND_RESET_IRQ:
-               ret = do_vm86_irq_handling(cmd, (int)arg);
-               goto out;
+               return do_vm86_irq_handling(cmd, (int)arg);
        case VM86_PLUS_INSTALL_CHECK:
                /*
                 * NOTE: on old vm86 stuff this will return the error
@@ -257,28 +252,23 @@ int sys_vm86(unsigned long cmd, unsigned long arg)
                 *  interpreted as (invalid) address to vm86_struct.
                 *  So the installation check works.
                 */
-               ret = 0;
-               goto out;
+               return 0;
        }
 
        /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
-       ret = -EPERM;
        if (tsk->thread.saved_sp0)
-               goto out;
+               return -EPERM;
        v86 = (struct vm86plus_struct __user *)arg;
        tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
                                       offsetof(struct kernel_vm86_struct, regs32) -
                                       sizeof(info.regs));
-       ret = -EFAULT;
        if (tmp)
-               goto out;
+               return -EFAULT;
        info.regs32 = current_pt_regs();
        info.vm86plus.is_vm86pus = 1;
        tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
        do_sys_vm86(&info, tsk);
-       ret = 0;        /* we never return here */
-out:
-       return ret;
+       return 0;       /* we never return here */
 }
 
 
index 8e517bba6a7c9434099139ecbb82a5017ca49cbf..8db0010ed150d5c1fa1d0b6aa96668587c2d1004 100644 (file)
@@ -60,6 +60,7 @@
 #define OpGS              25ull  /* GS */
 #define OpMem8            26ull  /* 8-bit zero extended memory operand */
 #define OpImm64           27ull  /* Sign extended 16/32/64-bit immediate */
+#define OpXLat            28ull  /* memory at BX/EBX/RBX + zero-extended AL */
 
 #define OpBits             5  /* Width of operand field */
 #define OpMask             ((1ull << OpBits) - 1)
 #define SrcImmUByte (OpImmUByte << SrcShift)
 #define SrcImmU     (OpImmU << SrcShift)
 #define SrcSI       (OpSI << SrcShift)
+#define SrcXLat     (OpXLat << SrcShift)
 #define SrcImmFAddr (OpImmFAddr << SrcShift)
 #define SrcMemFAddr (OpMemFAddr << SrcShift)
 #define SrcAcc      (OpAcc << SrcShift)
@@ -533,6 +535,9 @@ FOP_SETCC(setle)
 FOP_SETCC(setnle)
 FOP_END;
 
+FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
+FOP_END;
+
 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex)                 \
        do {                                                            \
                unsigned long _tmp;                                     \
@@ -2996,6 +3001,28 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
        return X86EMUL_CONTINUE;
 }
 
+static int em_aam(struct x86_emulate_ctxt *ctxt)
+{
+       u8 al, ah;
+
+       if (ctxt->src.val == 0)
+               return emulate_de(ctxt);
+
+       al = ctxt->dst.val & 0xff;
+       ah = al / ctxt->src.val;
+       al %= ctxt->src.val;
+
+       ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
+
+       /* Set PF, ZF, SF */
+       ctxt->src.type = OP_IMM;
+       ctxt->src.val = 0;
+       ctxt->src.bytes = 1;
+       fastop(ctxt, em_or);
+
+       return X86EMUL_CONTINUE;
+}
+
 static int em_aad(struct x86_emulate_ctxt *ctxt)
 {
        u8 al = ctxt->dst.val & 0xff;
@@ -3936,7 +3963,10 @@ static const struct opcode opcode_table[256] = {
        /* 0xD0 - 0xD7 */
        G(Src2One | ByteOp, group2), G(Src2One, group2),
        G(Src2CL | ByteOp, group2), G(Src2CL, group2),
-       N, I(DstAcc | SrcImmByte | No64, em_aad), N, N,
+       I(DstAcc | SrcImmUByte | No64, em_aam),
+       I(DstAcc | SrcImmUByte | No64, em_aad),
+       F(DstAcc | ByteOp | No64, em_salc),
+       I(DstAcc | SrcXLat | ByteOp, em_mov),
        /* 0xD8 - 0xDF */
        N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
        /* 0xE0 - 0xE7 */
@@ -4198,6 +4228,16 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
                op->val = 0;
                op->count = 1;
                break;
+       case OpXLat:
+               op->type = OP_MEM;
+               op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
+               op->addr.mem.ea =
+                       register_address(ctxt,
+                               reg_read(ctxt, VCPU_REGS_RBX) +
+                               (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
+               op->addr.mem.seg = seg_override(ctxt);
+               op->val = 0;
+               break;
        case OpImmFAddr:
                op->type = OP_IMM;
                op->addr.mem.ea = ctxt->_eip;
index 25a791ed21c88057697eb06339f7066d8cfa63e2..260a9193955538b4fea743045b2f964b2736b24e 100644 (file)
@@ -5434,6 +5434,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                        return 0;
                }
 
+               if (vcpu->arch.halt_request) {
+                       vcpu->arch.halt_request = 0;
+                       ret = kvm_emulate_halt(vcpu);
+                       goto out;
+               }
+
                if (signal_pending(current))
                        goto out;
                if (need_resched())
index 05a8b1a2300df0997d116e753506bf6e6ffa1fe2..094b5d96ab1468c1875a2a2a5e682245f2909db9 100644 (file)
@@ -555,6 +555,25 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 }
 EXPORT_SYMBOL_GPL(kvm_lmsw);
 
+static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
+{
+       if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
+                       !vcpu->guest_xcr0_loaded) {
+               /* kvm_set_xcr() also depends on this */
+               xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+               vcpu->guest_xcr0_loaded = 1;
+       }
+}
+
+static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->guest_xcr0_loaded) {
+               if (vcpu->arch.xcr0 != host_xcr0)
+                       xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
+               vcpu->guest_xcr0_loaded = 0;
+       }
+}
+
 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
        u64 xcr0;
@@ -571,8 +590,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
                return 1;
        if (xcr0 & ~host_xcr0)
                return 1;
+       kvm_put_guest_xcr0(vcpu);
        vcpu->arch.xcr0 = xcr0;
-       vcpu->guest_xcr0_loaded = 0;
        return 0;
 }
 
@@ -5614,25 +5633,6 @@ static void inject_pending_event(struct kvm_vcpu *vcpu)
        }
 }
 
-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
-{
-       if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
-                       !vcpu->guest_xcr0_loaded) {
-               /* kvm_set_xcr() also depends on this */
-               xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
-               vcpu->guest_xcr0_loaded = 1;
-       }
-}
-
-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
-{
-       if (vcpu->guest_xcr0_loaded) {
-               if (vcpu->arch.xcr0 != host_xcr0)
-                       xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
-               vcpu->guest_xcr0_loaded = 0;
-       }
-}
-
 static void process_nmi(struct kvm_vcpu *vcpu)
 {
        unsigned limit = 2;
index 4a9be6ddf05437e66ab8c94862a9b3e8a30e4373..48e8461057ba9346442aa5c72d6772b190dcf4c9 100644 (file)
@@ -295,11 +295,10 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                        int pos;
                        u32 table_offset, bir;
 
-                       pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
-
+                       pos = dev->msix_cap;
                        pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
                                              &table_offset);
-                       bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
+                       bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
 
                        map_irq.table_base = pci_resource_start(dev, bir);
                        map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
index d0d59bfbccce01e2e019ffd64b7d57e3ddad9622..aabfb8380a1c6cf91e5af8aaed9dd30bd088d9de 100644 (file)
 336    i386    perf_event_open         sys_perf_event_open
 337    i386    recvmmsg                sys_recvmmsg                    compat_sys_recvmmsg
 338    i386    fanotify_init           sys_fanotify_init
-339    i386    fanotify_mark           sys_fanotify_mark               sys32_fanotify_mark
+339    i386    fanotify_mark           sys_fanotify_mark               compat_sys_fanotify_mark
 340    i386    prlimit64               sys_prlimit64
 341    i386    name_to_handle_at       sys_name_to_handle_at
 342    i386    open_by_handle_at       sys_open_by_handle_at           compat_sys_open_by_handle_at
index 53d4f680c9b59f498d41148325cd5a96ef1723e3..a492be2635ac048a527d856b796431ccac72d9fd 100644 (file)
 
 EXPORT_SYMBOL_GPL(hypercall_page);
 
+/*
+ * Pointer to the xen_vcpu_info structure or
+ * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
+ * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
+ * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point
+ * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to
+ * acknowledge pending events.
+ * Also more subtly it is used by the patched version of irq enable/disable
+ * e.g. xen_irq_enable_direct and xen_iret in PV mode.
+ *
+ * The desire to be able to do those mask/unmask operations as a single
+ * instruction by using the per-cpu offset held in %gs is the real reason
+ * vcpu info is in a per-cpu pointer and the original reason for this
+ * hypercall.
+ *
+ */
 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
+
+/*
+ * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info
+ * hypercall. This can be used both in PV and PVHVM mode. The structure
+ * overrides the default per_cpu(xen_vcpu, cpu) value.
+ */
 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 
 enum xen_domain_type xen_domain_type = XEN_NATIVE;
@@ -157,6 +179,21 @@ static void xen_vcpu_setup(int cpu)
 
        BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
 
+       /*
+        * This path is called twice on PVHVM - first during bootup via
+        * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
+        * hotplugged: cpu_up -> xen_hvm_cpu_notify.
+        * As we can only do the VCPUOP_register_vcpu_info once lets
+        * not over-write its result.
+        *
+        * For PV it is called during restore (xen_vcpu_restore) and bootup
+        * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
+        * use this function.
+        */
+       if (xen_hvm_domain()) {
+               if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
+                       return;
+       }
        if (cpu < MAX_VIRT_CPUS)
                per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
 
@@ -172,7 +209,12 @@ static void xen_vcpu_setup(int cpu)
 
        /* Check to see if the hypervisor will put the vcpu_info
           structure where we want it, which allows direct access via
-          a percpu-variable. */
+          a percpu-variable.
+          N.B. This hypercall can _only_ be called once per CPU. Subsequent
+          calls will error out with -EINVAL. This is due to the fact that
+          hypervisor has no unregister variant and this hypercall does not
+          allow to over-write info.mfn and info.offset.
+        */
        err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
 
        if (err) {
@@ -387,6 +429,9 @@ static void __init xen_init_cpuid_mask(void)
                cpuid_leaf1_edx_mask &=
                        ~((1 << X86_FEATURE_APIC) |  /* disable local APIC */
                          (1 << X86_FEATURE_ACPI));  /* disable ACPI */
+
+       cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
+
        ax = 1;
        cx = 0;
        xen_cpuid(&ax, &bx, &cx, &dx);
@@ -1603,6 +1648,9 @@ void __ref xen_hvm_init_shared_info(void)
         * online but xen_hvm_init_shared_info is run at resume time too and
         * in that case multiple vcpus might be online. */
        for_each_online_cpu(cpu) {
+               /* Leave it to be NULL. */
+               if (cpu >= MAX_VIRT_CPUS)
+                       continue;
                per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
        }
 }
index 8b54603ce81613fee6783a6d3869b0021878feb4..3002ec1bb71a27d193ce4b1e041f8b3a3c5b49fa 100644 (file)
@@ -364,7 +364,7 @@ void __cpuinit xen_init_lock_cpu(int cpu)
        int irq;
        const char *name;
 
-       WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n",
+       WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
             cpu, per_cpu(lock_kicker_irq, cpu));
 
        /*
index 8b4221cfd118bdb390fea9fa5e6bbfdfe37060f7..380a2003231e4c56984207dd7f2658bd9bcc3166 100644 (file)
@@ -26,6 +26,7 @@ config BCMA_HOST_PCI_POSSIBLE
 config BCMA_HOST_PCI
        bool "Support for BCMA on PCI-host bus"
        depends on BCMA_HOST_PCI_POSSIBLE
+       default y
 
 config BCMA_DRIVER_PCI_HOSTMODE
        bool "Driver for PCI core working in hostmode"
index 17b26ce7e051b1374b6be01f18f3dd1cad057e56..37a5ffe673d5da8c9ac6ee955d835d48b596ca7f 100644 (file)
@@ -9,6 +9,25 @@
 #include <linux/export.h>
 #include <linux/bcma/bcma.h>
 
+static bool bcma_core_wait_value(struct bcma_device *core, u16 reg, u32 mask,
+                                u32 value, int timeout)
+{
+       unsigned long deadline = jiffies + timeout;
+       u32 val;
+
+       do {
+               val = bcma_aread32(core, reg);
+               if ((val & mask) == value)
+                       return true;
+               cpu_relax();
+               udelay(10);
+       } while (!time_after_eq(jiffies, deadline));
+
+       bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
+
+       return false;
+}
+
 bool bcma_core_is_enabled(struct bcma_device *core)
 {
        if ((bcma_aread32(core, BCMA_IOCTL) & (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC))
@@ -25,13 +44,15 @@ void bcma_core_disable(struct bcma_device *core, u32 flags)
        if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
                return;
 
-       bcma_awrite32(core, BCMA_IOCTL, flags);
-       bcma_aread32(core, BCMA_IOCTL);
-       udelay(10);
+       bcma_core_wait_value(core, BCMA_RESET_ST, ~0, 0, 300);
 
        bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
        bcma_aread32(core, BCMA_RESET_CTL);
        udelay(1);
+
+       bcma_awrite32(core, BCMA_IOCTL, flags);
+       bcma_aread32(core, BCMA_IOCTL);
+       udelay(10);
 }
 EXPORT_SYMBOL_GPL(bcma_core_disable);
 
@@ -43,6 +64,7 @@ int bcma_core_enable(struct bcma_device *core, u32 flags)
        bcma_aread32(core, BCMA_IOCTL);
 
        bcma_awrite32(core, BCMA_RESET_CTL, 0);
+       bcma_aread32(core, BCMA_RESET_CTL);
        udelay(1);
 
        bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags));
index e6ed4fe5dcedc50a758db62a5ca8a1d0c30d3b38..4d07cce9c5d97ad2d7d9733c90209bfcb5a094da 100644 (file)
@@ -30,7 +30,7 @@ struct bcma_sflash_tbl_e {
        u16 numblocks;
 };
 
-static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
+static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
        { "M25P20", 0x11, 0x10000, 4, },
        { "M25P40", 0x12, 0x10000, 8, },
 
@@ -41,7 +41,7 @@ static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
        { 0 },
 };
 
-static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
+static const struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
        { "SST25WF512", 1, 0x1000, 16, },
        { "SST25VF512", 0x48, 0x1000, 16, },
        { "SST25WF010", 2, 0x1000, 32, },
@@ -59,7 +59,7 @@ static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
        { 0 },
 };
 
-static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
+static const struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
        { "AT45DB011", 0xc, 256, 512, },
        { "AT45DB021", 0x14, 256, 1024, },
        { "AT45DB041", 0x1c, 256, 2048, },
@@ -89,7 +89,7 @@ int bcma_sflash_init(struct bcma_drv_cc *cc)
 {
        struct bcma_bus *bus = cc->core->bus;
        struct bcma_sflash *sflash = &cc->sflash;
-       struct bcma_sflash_tbl_e *e;
+       const struct bcma_sflash_tbl_e *e;
        u32 id, id2;
 
        switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
index 8934298a638dfdbfb6d2e5e4fa6e43cb65aeac8b..de15b4f4b2377b74396c69e217282d7151c89d28 100644 (file)
@@ -72,12 +72,12 @@ fail:
  * R/W ops.
  **************************************************/
 
-static void bcma_sprom_read(struct bcma_bus *bus, u16 offset, u16 *sprom)
+static void bcma_sprom_read(struct bcma_bus *bus, u16 offset, u16 *sprom,
+                           size_t words)
 {
        int i;
-       for (i = 0; i < SSB_SPROMSIZE_WORDS_R4; i++)
-               sprom[i] = bcma_read16(bus->drv_cc.core,
-                                      offset + (i * 2));
+       for (i = 0; i < words; i++)
+               sprom[i] = bcma_read16(bus->drv_cc.core, offset + (i * 2));
 }
 
 /**************************************************
@@ -124,29 +124,29 @@ static inline u8 bcma_crc8(u8 crc, u8 data)
        return t[crc ^ data];
 }
 
-static u8 bcma_sprom_crc(const u16 *sprom)
+static u8 bcma_sprom_crc(const u16 *sprom, size_t words)
 {
        int word;
        u8 crc = 0xFF;
 
-       for (word = 0; word < SSB_SPROMSIZE_WORDS_R4 - 1; word++) {
+       for (word = 0; word < words - 1; word++) {
                crc = bcma_crc8(crc, sprom[word] & 0x00FF);
                crc = bcma_crc8(crc, (sprom[word] & 0xFF00) >> 8);
        }
-       crc = bcma_crc8(crc, sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & 0x00FF);
+       crc = bcma_crc8(crc, sprom[words - 1] & 0x00FF);
        crc ^= 0xFF;
 
        return crc;
 }
 
-static int bcma_sprom_check_crc(const u16 *sprom)
+static int bcma_sprom_check_crc(const u16 *sprom, size_t words)
 {
        u8 crc;
        u8 expected_crc;
        u16 tmp;
 
-       crc = bcma_sprom_crc(sprom);
-       tmp = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & SSB_SPROM_REVISION_CRC;
+       crc = bcma_sprom_crc(sprom, words);
+       tmp = sprom[words - 1] & SSB_SPROM_REVISION_CRC;
        expected_crc = tmp >> SSB_SPROM_REVISION_CRC_SHIFT;
        if (crc != expected_crc)
                return -EPROTO;
@@ -154,21 +154,25 @@ static int bcma_sprom_check_crc(const u16 *sprom)
        return 0;
 }
 
-static int bcma_sprom_valid(const u16 *sprom)
+static int bcma_sprom_valid(struct bcma_bus *bus, const u16 *sprom,
+                           size_t words)
 {
        u16 revision;
        int err;
 
-       err = bcma_sprom_check_crc(sprom);
+       err = bcma_sprom_check_crc(sprom, words);
        if (err)
                return err;
 
-       revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & SSB_SPROM_REVISION_REV;
-       if (revision != 8 && revision != 9) {
+       revision = sprom[words - 1] & SSB_SPROM_REVISION_REV;
+       if (revision != 8 && revision != 9 && revision != 10) {
                pr_err("Unsupported SPROM revision: %d\n", revision);
                return -ENOENT;
        }
 
+       bus->sprom.revision = revision;
+       bcma_debug(bus, "Found SPROM revision %d\n", revision);
+
        return 0;
 }
 
@@ -208,9 +212,6 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
        BUILD_BUG_ON(ARRAY_SIZE(pwr_info_offset) !=
                        ARRAY_SIZE(bus->sprom.core_pwr_info));
 
-       bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] &
-               SSB_SPROM_REVISION_REV;
-
        for (i = 0; i < 3; i++) {
                v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
                *(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
@@ -502,7 +503,6 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
        case BCMA_CHIP_ID_BCM4331:
                present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT;
                break;
-
        case BCMA_CHIP_ID_BCM43224:
        case BCMA_CHIP_ID_BCM43225:
                /* for these chips OTP is always available */
@@ -550,7 +550,9 @@ int bcma_sprom_get(struct bcma_bus *bus)
 {
        u16 offset = BCMA_CC_SPROM;
        u16 *sprom;
-       int err = 0;
+       size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4,
+                                SSB_SPROMSIZE_WORDS_R10, };
+       int i, err = 0;
 
        if (!bus->drv_cc.core)
                return -EOPNOTSUPP;
@@ -579,32 +581,37 @@ int bcma_sprom_get(struct bcma_bus *bus)
                }
        }
 
-       sprom = kcalloc(SSB_SPROMSIZE_WORDS_R4, sizeof(u16),
-                       GFP_KERNEL);
-       if (!sprom)
-               return -ENOMEM;
-
        if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
            bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
                bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
 
        bcma_debug(bus, "SPROM offset 0x%x\n", offset);
-       bcma_sprom_read(bus, offset, sprom);
+       for (i = 0; i < ARRAY_SIZE(sprom_sizes); i++) {
+               size_t words = sprom_sizes[i];
+
+               sprom = kcalloc(words, sizeof(u16), GFP_KERNEL);
+               if (!sprom)
+                       return -ENOMEM;
+
+               bcma_sprom_read(bus, offset, sprom, words);
+               err = bcma_sprom_valid(bus, sprom, words);
+               if (!err)
+                       break;
+
+               kfree(sprom);
+       }
 
        if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
            bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
                bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
 
-       err = bcma_sprom_valid(sprom);
        if (err) {
-               bcma_warn(bus, "invalid sprom read from the PCIe card, try to use fallback sprom\n");
+               bcma_warn(bus, "Invalid SPROM read from the PCIe card, trying to use fallback SPROM\n");
                err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
-               goto out;
+       } else {
+               bcma_sprom_extract_r8(bus, sprom);
+               kfree(sprom);
        }
 
-       bcma_sprom_extract_r8(bus, sprom);
-
-out:
-       kfree(sprom);
        return err;
 }
index 13693b7a0d5cf3362cc888919692c1396953b67e..75c262694632255e0932fa387c7e6b9d6e08aa25 100644 (file)
@@ -554,6 +554,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
        skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
        if (skb == NULL) {
                BT_ERR("No free skb");
+               ret = -ENOMEM;
                goto exit;
        }
 
index 7a7e5f8ecadcffef990eaea6c2fafdcf94ff4c4b..de4cf4daa2f4cefa3a270ccaf8e4e8fb22af1015 100644 (file)
@@ -57,6 +57,9 @@ static struct usb_device_id btusb_table[] = {
        /* Apple-specific (Broadcom) devices */
        { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
 
+       /* MediaTek MT76x0E */
+       { USB_DEVICE(0x0e8d, 0x763f) },
+
        /* Broadcom SoftSailing reporting vendor specific */
        { USB_DEVICE(0x0a5c, 0x21e1) },
 
@@ -1616,6 +1619,7 @@ static struct usb_driver btusb_driver = {
 #ifdef CONFIG_PM
        .suspend        = btusb_suspend,
        .resume         = btusb_resume,
+       .reset_resume   = btusb_resume,
 #endif
        .id_table       = btusb_table,
        .supports_autosuspend = 1,
index 6961bbeab3edb0c9a75767865c87bc2a6a6b4386..264f550999406f7b924be6634917e3826e80a161 100644 (file)
@@ -1685,6 +1685,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
@@ -2341,7 +2342,7 @@ struct hid_device *hid_allocate_device(void)
 
        init_waitqueue_head(&hdev->debug_wait);
        INIT_LIST_HEAD(&hdev->debug_list);
-       mutex_init(&hdev->debug_list_lock);
+       spin_lock_init(&hdev->debug_list_lock);
        sema_init(&hdev->driver_lock, 1);
        sema_init(&hdev->driver_input_lock, 1);
 
index 7e56cb3855e329696abea711445bffe588d101f6..8453214ec3767d6c55aefe2bb99466d426904403 100644 (file)
@@ -579,15 +579,16 @@ void hid_debug_event(struct hid_device *hdev, char *buf)
 {
        int i;
        struct hid_debug_list *list;
+       unsigned long flags;
 
-       mutex_lock(&hdev->debug_list_lock);
+       spin_lock_irqsave(&hdev->debug_list_lock, flags);
        list_for_each_entry(list, &hdev->debug_list, node) {
                for (i = 0; i < strlen(buf); i++)
                        list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
                                buf[i];
                list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
         }
-       mutex_unlock(&hdev->debug_list_lock);
+       spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
 
        wake_up_interruptible(&hdev->debug_wait);
 }
@@ -977,6 +978,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
 {
        int err = 0;
        struct hid_debug_list *list;
+       unsigned long flags;
 
        if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) {
                err = -ENOMEM;
@@ -992,9 +994,9 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
        file->private_data = list;
        mutex_init(&list->read_mutex);
 
-       mutex_lock(&list->hdev->debug_list_lock);
+       spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
        list_add_tail(&list->node, &list->hdev->debug_list);
-       mutex_unlock(&list->hdev->debug_list_lock);
+       spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
 
 out:
        return err;
@@ -1088,10 +1090,11 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
 static int hid_debug_events_release(struct inode *inode, struct file *file)
 {
        struct hid_debug_list *list = file->private_data;
+       unsigned long flags;
 
-       mutex_lock(&list->hdev->debug_list_lock);
+       spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
        list_del(&list->node);
-       mutex_unlock(&list->hdev->debug_list_lock);
+       spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
        kfree(list->hid_debug_buf);
        kfree(list);
 
index 9b0efb0083feaf4a1f258226aad0426c7b961721..d1649119211277275840a1bee984ed93de28c025 100644 (file)
@@ -18,7 +18,8 @@
 
 #include "hid-ids.h"
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+    (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
 #define SRWS1_NUMBER_LEDS 15
 struct steelseries_srws1_data {
        __u16 led_state;
@@ -107,7 +108,8 @@ static __u8 steelseries_srws1_rdesc_fixed[] = {
 0xC0                /*  End Collection                      */
 };
 
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+    (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
 static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds)
 {
        struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
@@ -370,7 +372,8 @@ MODULE_DEVICE_TABLE(hid, steelseries_srws1_devices);
 static struct hid_driver steelseries_srws1_driver = {
        .name = "steelseries_srws1",
        .id_table = steelseries_srws1_devices,
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+    (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
        .probe = steelseries_srws1_probe,
        .remove = steelseries_srws1_remove,
 #endif
index 0e8fab1913dfd8737ede3b7d9f332aee0861169e..fa6964d8681a0d126fcf7c4845896b3f06298f8f 100644 (file)
@@ -272,6 +272,27 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
                .exit_latency = 166,
                .target_residency = 500,
                .enter = &intel_idle },
+       {
+               .name = "C8-HSW",
+               .desc = "MWAIT 0x40",
+               .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 300,
+               .target_residency = 900,
+               .enter = &intel_idle },
+       {
+               .name = "C9-HSW",
+               .desc = "MWAIT 0x50",
+               .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 600,
+               .target_residency = 1800,
+               .enter = &intel_idle },
+       {
+               .name = "C10-HSW",
+               .desc = "MWAIT 0x60",
+               .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 2600,
+               .target_residency = 7700,
+               .enter = &intel_idle },
        {
                .enter = NULL }
 };
index c6083132c4b8ccaf21c7addb61cf8596165bf915..0387e05cdb98b9708bde55143cd8a5cba853a2fa 100644 (file)
@@ -319,6 +319,9 @@ static void __cache_size_refresh(void)
 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
                               enum data_mode *data_mode)
 {
+       unsigned noio_flag;
+       void *ptr;
+
        if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
                *data_mode = DATA_MODE_SLAB;
                return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
@@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
        }
 
        *data_mode = DATA_MODE_VMALLOC;
-       return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+       /*
+        * __vmalloc allocates the data pages and auxiliary structures with
+        * gfp_flags that were specified, but pagetables are always allocated
+        * with GFP_KERNEL, no matter what was specified as gfp_mask.
+        *
+        * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
+        * all allocations done by this process (including pagetables) are done
+        * as if GFP_NOIO was specified.
+        */
+
+       if (gfp_mask & __GFP_NORETRY)
+               noio_flag = memalloc_noio_save();
+
+       ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
+
+       if (gfp_mask & __GFP_NORETRY)
+               memalloc_noio_restore(noio_flag);
+
+       return ptr;
 }
 
 /*
index 83e995fece88c1330335c85a23ef42b2ed54a1f4..1af7255bbffb547aa67db8b1f38bf99b2ff87094 100644 (file)
@@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
                                 struct dm_cache_statistics *stats)
 {
        down_read(&cmd->root_lock);
-       memcpy(stats, &cmd->stats, sizeof(*stats));
+       *stats = cmd->stats;
        up_read(&cmd->root_lock);
 }
 
@@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
                                 struct dm_cache_statistics *stats)
 {
        down_write(&cmd->root_lock);
-       memcpy(&cmd->stats, stats, sizeof(*stats));
+       cmd->stats = *stats;
        up_write(&cmd->root_lock);
 }
 
index 558bdfdabf5f2da39e5bef487dc5008a2399b659..33369ca9614f978ae0481e0d79677c980660f42d 100644 (file)
@@ -130,8 +130,8 @@ struct dm_cache_policy {
         *
         * Must not block.
         *
-        * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK
-        * would be typical).
+        * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
+        * (-EWOULDBLOCK would be typical).
         */
        int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
 
index 10744091e6cabb5f6bb55e59709eb4ba0350a98b..df44b60e66f289880c177e98d78f1aa04ce08e7a 100644 (file)
@@ -205,7 +205,7 @@ struct per_bio_data {
        /*
         * writethrough fields.  These MUST remain at the end of this
         * structure and the 'cache' member must be the first as it
-        * is used to determine the offsetof the writethrough fields.
+        * is used to determine the offset of the writethrough fields.
         */
        struct cache *cache;
        dm_cblock_t cblock;
@@ -393,7 +393,7 @@ static int get_cell(struct cache *cache,
        return r;
 }
 
- /*----------------------------------------------------------------*/
+/*----------------------------------------------------------------*/
 
 static bool is_dirty(struct cache *cache, dm_cblock_t b)
 {
@@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
 }
 
 /*----------------------------------------------------------------*/
+
 static bool block_size_is_power_of_two(struct cache *cache)
 {
        return cache->sectors_per_block_shift >= 0;
@@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err)
 
        /*
         * We can't issue this bio directly, since we're in interrupt
-        * context.  So it get's put on a bio list for processing by the
+        * context.  So it gets put on a bio list for processing by the
         * worker thread.
         */
        defer_writethrough_bio(pb->cache, bio);
@@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws)
 static void do_waker(struct work_struct *ws)
 {
        struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
+       policy_tick(cache->policy);
        wake_worker(cache);
        queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
 }
@@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
 
 static struct kmem_cache *migration_cache;
 
-static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
+#define NOT_CORE_OPTION 1
+
+static int process_config_option(struct cache *cache, const char *key, const char *value)
+{
+       unsigned long tmp;
+
+       if (!strcasecmp(key, "migration_threshold")) {
+               if (kstrtoul(value, 10, &tmp))
+                       return -EINVAL;
+
+               cache->migration_threshold = tmp;
+               return 0;
+       }
+
+       return NOT_CORE_OPTION;
+}
+
+static int set_config_value(struct cache *cache, const char *key, const char *value)
+{
+       int r = process_config_option(cache, key, value);
+
+       if (r == NOT_CORE_OPTION)
+               r = policy_set_config_value(cache->policy, key, value);
+
+       if (r)
+               DMWARN("bad config value for %s: %s", key, value);
+
+       return r;
+}
+
+static int set_config_values(struct cache *cache, int argc, const char **argv)
 {
        int r = 0;
 
@@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
        }
 
        while (argc) {
-               r = policy_set_config_value(p, argv[0], argv[1]);
-               if (r) {
-                       DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
-                              argv[0], argv[1]);
-                       return r;
-               }
+               r = set_config_value(cache, argv[0], argv[1]);
+               if (r)
+                       break;
 
                argc -= 2;
                argv += 2;
@@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
                               char **error)
 {
-       int r;
-
        cache->policy = dm_cache_policy_create(ca->policy_name,
                                               cache->cache_size,
                                               cache->origin_sectors,
@@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
                return -ENOMEM;
        }
 
-       r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
-       if (r) {
-               *error = "Error setting cache policy's config values";
-               dm_cache_policy_destroy(cache->policy);
-               cache->policy = NULL;
-       }
-
-       return r;
+       return 0;
 }
 
 /*
@@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
        return discard_block_size;
 }
 
-#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
+#define DEFAULT_MIGRATION_THRESHOLD 2048
 
 static int cache_create(struct cache_args *ca, struct cache **result)
 {
@@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        ti->discards_supported = true;
        ti->discard_zeroes_data_unsupported = true;
 
-       memcpy(&cache->features, &ca->features, sizeof(cache->features));
+       cache->features = ca->features;
        ti->per_bio_data_size = get_per_bio_data_size(cache);
 
        cache->callbacks.congested_fn = cache_is_congested;
@@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        r = create_cache_policy(cache, ca, error);
        if (r)
                goto bad;
+
        cache->policy_nr_args = ca->policy_argc;
+       cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
+
+       r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
+       if (r) {
+               *error = "Error setting cache policy's config values";
+               goto bad;
+       }
 
        cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
                                     ca->block_size, may_format,
@@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        INIT_LIST_HEAD(&cache->quiesced_migrations);
        INIT_LIST_HEAD(&cache->completed_migrations);
        INIT_LIST_HEAD(&cache->need_commit_migrations);
-       cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
        atomic_set(&cache->nr_migrations, 0);
        init_waitqueue_head(&cache->migration_wait);
 
+       r = -ENOMEM;
        cache->nr_dirty = 0;
        cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
        if (!cache->dirty_bitset) {
@@ -2517,23 +2545,6 @@ err:
        DMEMIT("Error");
 }
 
-#define NOT_CORE_OPTION 1
-
-static int process_config_option(struct cache *cache, char **argv)
-{
-       unsigned long tmp;
-
-       if (!strcasecmp(argv[0], "migration_threshold")) {
-               if (kstrtoul(argv[1], 10, &tmp))
-                       return -EINVAL;
-
-               cache->migration_threshold = tmp;
-               return 0;
-       }
-
-       return NOT_CORE_OPTION;
-}
-
 /*
  * Supports <key> <value>.
  *
@@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv)
  */
 static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
 {
-       int r;
        struct cache *cache = ti->private;
 
        if (argc != 2)
                return -EINVAL;
 
-       r = process_config_option(cache, argv);
-       if (r == NOT_CORE_OPTION)
-               return policy_set_config_value(cache->policy, argv[0], argv[1]);
-
-       return r;
+       return set_config_value(cache, argv[0], argv[1]);
 }
 
 static int cache_iterate_devices(struct dm_target *ti,
@@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type cache_target = {
        .name = "cache",
-       .version = {1, 1, 0},
+       .version = {1, 1, 1},
        .module = THIS_MODULE,
        .ctr = cache_ctr,
        .dtr = cache_dtr,
index 51bb81676be37d2ff4520557c70f83bda488845a..bdf26f5bd326011595f8b9801cc2e408af6842cc 100644 (file)
@@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
 
        ti->num_flush_bios = 1;
        ti->num_discard_bios = 1;
+       ti->num_write_same_bios = 1;
 
        return 0;
 
index c0e07026a8d136f084958800192c12236ffd37e2..c434e5aab2dfc9e6a63ca7700e5ac1c1deacd025 100644 (file)
@@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
        if (!s->pending_pool) {
                ti->error = "Could not allocate mempool for pending exceptions";
+               r = -ENOMEM;
                goto bad_pending_pool;
        }
 
index ea5e878a30b93b1f974d449738fc46b55d5eaeba..d907ca6227cec3519e46ac95e146656b342125c2 100644 (file)
@@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
 static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
        struct stripe_c *sc;
-       sector_t width;
+       sector_t width, tmp_len;
        uint32_t stripes;
        uint32_t chunk_size;
        int r;
@@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        width = ti->len;
-       if (sector_div(width, chunk_size)) {
+       if (sector_div(width, stripes)) {
                ti->error = "Target length not divisible by "
-                   "chunk size";
+                   "number of stripes";
                return -EINVAL;
        }
 
-       if (sector_div(width, stripes)) {
+       tmp_len = width;
+       if (sector_div(tmp_len, chunk_size)) {
                ti->error = "Target length not divisible by "
-                   "number of stripes";
+                   "chunk size";
                return -EINVAL;
        }
 
index e50dad0c65f4759d535b5cf81b0258a3b27066b5..1ff252ab7d46a1ed55fb98a93426acfb764998dd 100644 (file)
@@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
                        return false;
 
                if (!ti->type->iterate_devices ||
-                   !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
+                   ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
                        return false;
        }
 
index 00cee02f8fc9b299788941c9b4147abdfc56c83c..60bce435f4fa1443c2994bd483e70ea096c7aa92 100644 (file)
@@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
        return r;
 }
 
-static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
+static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
 {
        int r;
        dm_block_t old_count;
 
-       r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count);
+       r = dm_sm_get_nr_blocks(sm, &old_count);
        if (r)
                return r;
 
@@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
                return 0;
 
        if (new_count < old_count) {
-               DMERR("cannot reduce size of data device");
+               DMERR("cannot reduce size of space map");
                return -EINVAL;
        }
 
-       return dm_sm_extend(pmd->data_sm, new_count - old_count);
+       return dm_sm_extend(sm, new_count - old_count);
 }
 
 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
@@ -1671,7 +1671,19 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
 
        down_write(&pmd->root_lock);
        if (!pmd->fail_io)
-               r = __resize_data_dev(pmd, new_count);
+               r = __resize_space_map(pmd->data_sm, new_count);
+       up_write(&pmd->root_lock);
+
+       return r;
+}
+
+int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
+{
+       int r = -EINVAL;
+
+       down_write(&pmd->root_lock);
+       if (!pmd->fail_io)
+               r = __resize_space_map(pmd->metadata_sm, new_count);
        up_write(&pmd->root_lock);
 
        return r;
@@ -1684,3 +1696,17 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
        dm_bm_set_read_only(pmd->bm);
        up_write(&pmd->root_lock);
 }
+
+int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+                                       dm_block_t threshold,
+                                       dm_sm_threshold_fn fn,
+                                       void *context)
+{
+       int r;
+
+       down_write(&pmd->root_lock);
+       r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
+       up_write(&pmd->root_lock);
+
+       return r;
+}
index 0cecc3702885fb57452c013f83679fc437a393ca..845ebbe589a9e0a00505bab0150df48331233928 100644 (file)
@@ -8,6 +8,7 @@
 #define DM_THIN_METADATA_H
 
 #include "persistent-data/dm-block-manager.h"
+#include "persistent-data/dm-space-map.h"
 
 #define THIN_METADATA_BLOCK_SIZE 4096
 
@@ -185,6 +186,7 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
  * blocks would be lost.
  */
 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
+int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
 
 /*
  * Flicks the underlying block manager into read only mode, so you know
@@ -192,6 +194,11 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
  */
 void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
 
+int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+                                       dm_block_t threshold,
+                                       dm_sm_threshold_fn fn,
+                                       void *context);
+
 /*----------------------------------------------------------------*/
 
 #endif
index 004ad1652b73477dae0194b957842434cf48c38d..759cffc45cabce8414c883f329b52fd56513bccf 100644 (file)
@@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
                return r;
 
        if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
-               DMWARN("%s: reached low water mark, sending event.",
+               DMWARN("%s: reached low water mark for data device: sending event.",
                       dm_device_name(pool->pool_md));
                spin_lock_irqsave(&pool->lock, flags);
                pool->low_water_triggered = 1;
@@ -1281,6 +1281,10 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio)
        bio_io_error(bio);
 }
 
+/*
+ * FIXME: should we also commit due to size of transaction, measured in
+ * metadata blocks?
+ */
 static int need_commit_due_to_time(struct pool *pool)
 {
        return jiffies < pool->last_commit_jiffies ||
@@ -1909,6 +1913,56 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
        return r;
 }
 
+static void metadata_low_callback(void *context)
+{
+       struct pool *pool = context;
+
+       DMWARN("%s: reached low water mark for metadata device: sending event.",
+              dm_device_name(pool->pool_md));
+
+       dm_table_event(pool->ti->table);
+}
+
+static sector_t get_metadata_dev_size(struct block_device *bdev)
+{
+       sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+       char buffer[BDEVNAME_SIZE];
+
+       if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
+               DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
+                      bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
+               metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
+       }
+
+       return metadata_dev_size;
+}
+
+static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
+{
+       sector_t metadata_dev_size = get_metadata_dev_size(bdev);
+
+       sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+
+       return metadata_dev_size;
+}
+
+/*
+ * When a metadata threshold is crossed a dm event is triggered, and
+ * userland should respond by growing the metadata device.  We could let
+ * userland set the threshold, like we do with the data threshold, but I'm
+ * not sure they know enough to do this well.
+ */
+static dm_block_t calc_metadata_threshold(struct pool_c *pt)
+{
+       /*
+        * 4M is ample for all ops with the possible exception of thin
+        * device deletion which is harmless if it fails (just retry the
+        * delete after you've grown the device).
+        */
+       dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
+       return min((dm_block_t)1024ULL /* 4M */, quarter);
+}
+
 /*
  * thin-pool <metadata dev> <data dev>
  *          <data block size (sectors)>
@@ -1931,8 +1985,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
        unsigned long block_size;
        dm_block_t low_water_blocks;
        struct dm_dev *metadata_dev;
-       sector_t metadata_dev_size;
-       char b[BDEVNAME_SIZE];
+       fmode_t metadata_mode;
 
        /*
         * FIXME Remove validation from scope of lock.
@@ -1944,19 +1997,32 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                r = -EINVAL;
                goto out_unlock;
        }
+
        as.argc = argc;
        as.argv = argv;
 
-       r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
+       /*
+        * Set default pool features.
+        */
+       pool_features_init(&pf);
+
+       dm_consume_args(&as, 4);
+       r = parse_pool_features(&as, &pf, ti);
+       if (r)
+               goto out_unlock;
+
+       metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
+       r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
        if (r) {
                ti->error = "Error opening metadata block device";
                goto out_unlock;
        }
 
-       metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
-       if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
-               DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
-                      bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
+       /*
+        * Run for the side-effect of possibly issuing a warning if the
+        * device is too big.
+        */
+       (void) get_metadata_dev_size(metadata_dev->bdev);
 
        r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
        if (r) {
@@ -1979,16 +2045,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                goto out;
        }
 
-       /*
-        * Set default pool features.
-        */
-       pool_features_init(&pf);
-
-       dm_consume_args(&as, 4);
-       r = parse_pool_features(&as, &pf, ti);
-       if (r)
-               goto out;
-
        pt = kzalloc(sizeof(*pt), GFP_KERNEL);
        if (!pt) {
                r = -ENOMEM;
@@ -2040,6 +2096,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
        }
        ti->private = pt;
 
+       r = dm_pool_register_metadata_threshold(pt->pool->pmd,
+                                               calc_metadata_threshold(pt),
+                                               metadata_low_callback,
+                                               pool);
+       if (r)
+               goto out_free_pt;
+
        pt->callbacks.congested_fn = pool_is_congested;
        dm_table_add_target_callbacks(ti->table, &pt->callbacks);
 
@@ -2079,18 +2142,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
        return r;
 }
 
-/*
- * Retrieves the number of blocks of the data device from
- * the superblock and compares it to the actual device size,
- * thus resizing the data device in case it has grown.
- *
- * This both copes with opening preallocated data devices in the ctr
- * being followed by a resume
- * -and-
- * calling the resume method individually after userspace has
- * grown the data device in reaction to a table event.
- */
-static int pool_preresume(struct dm_target *ti)
+static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
 {
        int r;
        struct pool_c *pt = ti->private;
@@ -2098,12 +2150,7 @@ static int pool_preresume(struct dm_target *ti)
        sector_t data_size = ti->len;
        dm_block_t sb_data_size;
 
-       /*
-        * Take control of the pool object.
-        */
-       r = bind_control_target(pool, ti);
-       if (r)
-               return r;
+       *need_commit = false;
 
        (void) sector_div(data_size, pool->sectors_per_block);
 
@@ -2114,7 +2161,7 @@ static int pool_preresume(struct dm_target *ti)
        }
 
        if (data_size < sb_data_size) {
-               DMERR("pool target too small, is %llu blocks (expected %llu)",
+               DMERR("pool target (%llu blocks) too small: expected %llu",
                      (unsigned long long)data_size, sb_data_size);
                return -EINVAL;
 
@@ -2122,17 +2169,90 @@ static int pool_preresume(struct dm_target *ti)
                r = dm_pool_resize_data_dev(pool->pmd, data_size);
                if (r) {
                        DMERR("failed to resize data device");
-                       /* FIXME Stricter than necessary: Rollback transaction instead here */
                        set_pool_mode(pool, PM_READ_ONLY);
                        return r;
                }
 
-               (void) commit_or_fallback(pool);
+               *need_commit = true;
        }
 
        return 0;
 }
 
+static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
+{
+       int r;
+       struct pool_c *pt = ti->private;
+       struct pool *pool = pt->pool;
+       dm_block_t metadata_dev_size, sb_metadata_dev_size;
+
+       *need_commit = false;
+
+       metadata_dev_size = get_metadata_dev_size(pool->md_dev);
+
+       r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
+       if (r) {
+               DMERR("failed to retrieve data device size");
+               return r;
+       }
+
+       if (metadata_dev_size < sb_metadata_dev_size) {
+               DMERR("metadata device (%llu sectors) too small: expected %llu",
+                     metadata_dev_size, sb_metadata_dev_size);
+               return -EINVAL;
+
+       } else if (metadata_dev_size > sb_metadata_dev_size) {
+               r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
+               if (r) {
+                       DMERR("failed to resize metadata device");
+                       return r;
+               }
+
+               *need_commit = true;
+       }
+
+       return 0;
+}
+
+/*
+ * Retrieves the number of blocks of the data device from
+ * the superblock and compares it to the actual device size,
+ * thus resizing the data device in case it has grown.
+ *
+ * This both copes with opening preallocated data devices in the ctr
+ * being followed by a resume
+ * -and-
+ * calling the resume method individually after userspace has
+ * grown the data device in reaction to a table event.
+ */
+static int pool_preresume(struct dm_target *ti)
+{
+       int r;
+       bool need_commit1, need_commit2;
+       struct pool_c *pt = ti->private;
+       struct pool *pool = pt->pool;
+
+       /*
+        * Take control of the pool object.
+        */
+       r = bind_control_target(pool, ti);
+       if (r)
+               return r;
+
+       r = maybe_resize_data_dev(ti, &need_commit1);
+       if (r)
+               return r;
+
+       r = maybe_resize_metadata_dev(ti, &need_commit2);
+       if (r)
+               return r;
+
+       if (need_commit1 || need_commit2)
+               (void) commit_or_fallback(pool);
+
+       return 0;
+}
+
 static void pool_resume(struct dm_target *ti)
 {
        struct pool_c *pt = ti->private;
@@ -2549,7 +2669,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 7, 0},
+       .version = {1, 8, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
index f6d29e614ab728c521b0ebc1f8a7f320e5099deb..e735a6d5a793dfff9c71330fd7555469d5a58501 100644 (file)
@@ -248,7 +248,8 @@ static struct dm_space_map ops = {
        .new_block = sm_disk_new_block,
        .commit = sm_disk_commit,
        .root_size = sm_disk_root_size,
-       .copy_root = sm_disk_copy_root
+       .copy_root = sm_disk_copy_root,
+       .register_threshold_callback = NULL
 };
 
 struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
index 906cf3df71af33365e460cbca0a82b0077fc6abf..1c959684caef7512fafd3c1a5505c29c798a9c11 100644 (file)
 
 /*----------------------------------------------------------------*/
 
+/*
+ * An edge triggered threshold.
+ */
+struct threshold {
+       bool threshold_set;
+       bool value_set;
+       dm_block_t threshold;
+       dm_block_t current_value;
+       dm_sm_threshold_fn fn;
+       void *context;
+};
+
+static void threshold_init(struct threshold *t)
+{
+       t->threshold_set = false;
+       t->value_set = false;
+}
+
+static void set_threshold(struct threshold *t, dm_block_t value,
+                         dm_sm_threshold_fn fn, void *context)
+{
+       t->threshold_set = true;
+       t->threshold = value;
+       t->fn = fn;
+       t->context = context;
+}
+
+static bool below_threshold(struct threshold *t, dm_block_t value)
+{
+       return t->threshold_set && value <= t->threshold;
+}
+
+static bool threshold_already_triggered(struct threshold *t)
+{
+       return t->value_set && below_threshold(t, t->current_value);
+}
+
+static void check_threshold(struct threshold *t, dm_block_t value)
+{
+       if (below_threshold(t, value) &&
+           !threshold_already_triggered(t))
+               t->fn(t->context);
+
+       t->value_set = true;
+       t->current_value = value;
+}
+
+/*----------------------------------------------------------------*/
+
 /*
  * Space map interface.
  *
@@ -54,6 +103,8 @@ struct sm_metadata {
        unsigned allocated_this_transaction;
        unsigned nr_uncommitted;
        struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
+
+       struct threshold threshold;
 };
 
 static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
@@ -144,12 +195,6 @@ static void sm_metadata_destroy(struct dm_space_map *sm)
        kfree(smm);
 }
 
-static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
-{
-       DMERR("doesn't support extend");
-       return -EINVAL;
-}
-
 static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
 {
        struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
@@ -335,9 +380,19 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
 
 static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
 {
+       dm_block_t count;
+       struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
        int r = sm_metadata_new_block_(sm, b);
        if (r)
                DMERR("unable to allocate new metadata block");
+
+       r = sm_metadata_get_nr_free(sm, &count);
+       if (r)
+               DMERR("couldn't get free block count");
+
+       check_threshold(&smm->threshold, count);
+
        return r;
 }
 
@@ -357,6 +412,18 @@ static int sm_metadata_commit(struct dm_space_map *sm)
        return 0;
 }
 
+static int sm_metadata_register_threshold_callback(struct dm_space_map *sm,
+                                                  dm_block_t threshold,
+                                                  dm_sm_threshold_fn fn,
+                                                  void *context)
+{
+       struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+
+       set_threshold(&smm->threshold, threshold, fn, context);
+
+       return 0;
+}
+
 static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
 {
        *result = sizeof(struct disk_sm_root);
@@ -382,6 +449,8 @@ static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t
        return 0;
 }
 
+static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
+
 static struct dm_space_map ops = {
        .destroy = sm_metadata_destroy,
        .extend = sm_metadata_extend,
@@ -395,7 +464,8 @@ static struct dm_space_map ops = {
        .new_block = sm_metadata_new_block,
        .commit = sm_metadata_commit,
        .root_size = sm_metadata_root_size,
-       .copy_root = sm_metadata_copy_root
+       .copy_root = sm_metadata_copy_root,
+       .register_threshold_callback = sm_metadata_register_threshold_callback
 };
 
 /*----------------------------------------------------------------*/
@@ -410,7 +480,7 @@ static void sm_bootstrap_destroy(struct dm_space_map *sm)
 
 static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
 {
-       DMERR("boostrap doesn't support extend");
+       DMERR("bootstrap doesn't support extend");
 
        return -EINVAL;
 }
@@ -450,7 +520,7 @@ static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
 static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
                                  uint32_t count)
 {
-       DMERR("boostrap doesn't support set_count");
+       DMERR("bootstrap doesn't support set_count");
 
        return -EINVAL;
 }
@@ -491,7 +561,7 @@ static int sm_bootstrap_commit(struct dm_space_map *sm)
 
 static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
 {
-       DMERR("boostrap doesn't support root_size");
+       DMERR("bootstrap doesn't support root_size");
 
        return -EINVAL;
 }
@@ -499,7 +569,7 @@ static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
 static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
                                  size_t max)
 {
-       DMERR("boostrap doesn't support copy_root");
+       DMERR("bootstrap doesn't support copy_root");
 
        return -EINVAL;
 }
@@ -517,11 +587,42 @@ static struct dm_space_map bootstrap_ops = {
        .new_block = sm_bootstrap_new_block,
        .commit = sm_bootstrap_commit,
        .root_size = sm_bootstrap_root_size,
-       .copy_root = sm_bootstrap_copy_root
+       .copy_root = sm_bootstrap_copy_root,
+       .register_threshold_callback = NULL
 };
 
 /*----------------------------------------------------------------*/
 
+static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+{
+       int r, i;
+       enum allocation_event ev;
+       struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+       dm_block_t old_len = smm->ll.nr_blocks;
+
+       /*
+        * Flick into a mode where all blocks get allocated in the new area.
+        */
+       smm->begin = old_len;
+       memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+
+       /*
+        * Extend.
+        */
+       r = sm_ll_extend(&smm->ll, extra_blocks);
+
+       /*
+        * Switch back to normal behaviour.
+        */
+       memcpy(&smm->sm, &ops, sizeof(smm->sm));
+       for (i = old_len; !r && i < smm->begin; i++)
+               r = sm_ll_inc(&smm->ll, i, &ev);
+
+       return r;
+}
+
+/*----------------------------------------------------------------*/
+
 struct dm_space_map *dm_sm_metadata_init(void)
 {
        struct sm_metadata *smm;
@@ -549,6 +650,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
        smm->recursion_count = 0;
        smm->allocated_this_transaction = 0;
        smm->nr_uncommitted = 0;
+       threshold_init(&smm->threshold);
 
        memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
 
@@ -590,6 +692,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
        smm->recursion_count = 0;
        smm->allocated_this_transaction = 0;
        smm->nr_uncommitted = 0;
+       threshold_init(&smm->threshold);
 
        memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
        return 0;
index 1cbfc6b1638a05a42535fe80c4e76afa0e39a3b0..3e6d1153b7c4b898b5a1355ad8bcc43cac2de4f4 100644 (file)
@@ -9,6 +9,8 @@
 
 #include "dm-block-manager.h"
 
+typedef void (*dm_sm_threshold_fn)(void *context);
+
 /*
  * struct dm_space_map keeps a record of how many times each block in a device
  * is referenced.  It needs to be fixed on disk as part of the transaction.
@@ -59,6 +61,15 @@ struct dm_space_map {
         */
        int (*root_size)(struct dm_space_map *sm, size_t *result);
        int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
+
+       /*
+        * You can register one threshold callback which is edge-triggered
+        * when the free space in the space map drops below the threshold.
+        */
+       int (*register_threshold_callback)(struct dm_space_map *sm,
+                                          dm_block_t threshold,
+                                          dm_sm_threshold_fn fn,
+                                          void *context);
 };
 
 /*----------------------------------------------------------------*/
@@ -131,4 +142,16 @@ static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le
        return sm->copy_root(sm, copy_to_here_le, len);
 }
 
+static inline int dm_sm_register_threshold_callback(struct dm_space_map *sm,
+                                                   dm_block_t threshold,
+                                                   dm_sm_threshold_fn fn,
+                                                   void *context)
+{
+       if (sm->register_threshold_callback)
+               return sm->register_threshold_callback(sm, threshold, fn, context);
+
+       return -EINVAL;
+}
+
+
 #endif /* _LINUX_DM_SPACE_MAP_H */
index f8f0156dff4e49b6fbe9fd1133055ad829f25f1b..200020eb30059d73767a8cda2540b4f9af9326cf 100644 (file)
@@ -280,5 +280,6 @@ source "drivers/net/wireless/rtlwifi/Kconfig"
 source "drivers/net/wireless/ti/Kconfig"
 source "drivers/net/wireless/zd1211rw/Kconfig"
 source "drivers/net/wireless/mwifiex/Kconfig"
+source "drivers/net/wireless/cw1200/Kconfig"
 
 endif # WLAN
index 67156efe14c4217f887e8bbb2be988e10945c122..0fab227025be33bbd399fd29b3ea2693a6d21000 100644 (file)
@@ -57,3 +57,5 @@ obj-$(CONFIG_MWIFIEX) += mwifiex/
 
 obj-$(CONFIG_BRCMFMAC) += brcm80211/
 obj-$(CONFIG_BRCMSMAC) += brcm80211/
+
+obj-$(CONFIG_CW1200)   += cw1200/
index 2c02b4e84094d32fc5605deaebb0ed148087fbf5..1abf1d421173085e72a4bfa549fd147f83a87ca5 100644 (file)
@@ -31,5 +31,6 @@ source "drivers/net/wireless/ath/carl9170/Kconfig"
 source "drivers/net/wireless/ath/ath6kl/Kconfig"
 source "drivers/net/wireless/ath/ar5523/Kconfig"
 source "drivers/net/wireless/ath/wil6210/Kconfig"
+source "drivers/net/wireless/ath/ath10k/Kconfig"
 
 endif
index 97b964ded2bef25e4218cc3843377ada32732077..fb05cfd193616ea65cecbfc489653409b173e617 100644 (file)
@@ -4,6 +4,7 @@ obj-$(CONFIG_CARL9170)          += carl9170/
 obj-$(CONFIG_ATH6KL)           += ath6kl/
 obj-$(CONFIG_AR5523)           += ar5523/
 obj-$(CONFIG_WIL6210)          += wil6210/
+obj-$(CONFIG_ATH10K)           += ath10k/
 
 obj-$(CONFIG_ATH_COMMON)       += ath.o
 
index 4521342c62cc37654ee1889b9ee177e395706f27..daeafeff186bb4f0ed313d8f6a7e8661fe0267ef 100644 (file)
@@ -239,13 +239,12 @@ enum ATH_DEBUG {
        ATH_DBG_CONFIG          = 0x00000200,
        ATH_DBG_FATAL           = 0x00000400,
        ATH_DBG_PS              = 0x00000800,
-       ATH_DBG_HWTIMER         = 0x00001000,
-       ATH_DBG_BTCOEX          = 0x00002000,
-       ATH_DBG_WMI             = 0x00004000,
-       ATH_DBG_BSTUCK          = 0x00008000,
-       ATH_DBG_MCI             = 0x00010000,
-       ATH_DBG_DFS             = 0x00020000,
-       ATH_DBG_WOW             = 0x00040000,
+       ATH_DBG_BTCOEX          = 0x00001000,
+       ATH_DBG_WMI             = 0x00002000,
+       ATH_DBG_BSTUCK          = 0x00004000,
+       ATH_DBG_MCI             = 0x00008000,
+       ATH_DBG_DFS             = 0x00010000,
+       ATH_DBG_WOW             = 0x00020000,
        ATH_DBG_ANY             = 0xffffffff
 };
 
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
new file mode 100644 (file)
index 0000000..cde58fe
--- /dev/null
@@ -0,0 +1,39 @@
+config ATH10K
+        tristate "Atheros 802.11ac wireless cards support"
+        depends on MAC80211
+       select ATH_COMMON
+        ---help---
+          This module adds support for wireless adapters based on
+          Atheros IEEE 802.11ac family of chipsets.
+
+          If you choose to build a module, it'll be called ath10k.
+
+config ATH10K_PCI
+       tristate "Atheros ath10k PCI support"
+       depends on ATH10K && PCI
+       ---help---
+         This module adds support for PCIE bus
+
+config ATH10K_DEBUG
+       bool "Atheros ath10k debugging"
+       depends on ATH10K
+       ---help---
+         Enables debug support
+
+         If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_DEBUGFS
+       bool "Atheros ath10k debugfs support"
+       depends on ATH10K
+       ---help---
+         Enabled debugfs support
+
+         If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_TRACING
+       bool "Atheros ath10k tracing support"
+       depends on ATH10K
+       depends on EVENT_TRACING
+       ---help---
+         Select this to ath10k use tracing infrastructure.
+
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
new file mode 100644 (file)
index 0000000..a4179f4
--- /dev/null
@@ -0,0 +1,20 @@
+obj-$(CONFIG_ATH10K) += ath10k_core.o
+ath10k_core-y += mac.o \
+                debug.o \
+                core.o \
+                htc.o \
+                htt.o \
+                htt_rx.o \
+                htt_tx.o \
+                txrx.o \
+                wmi.o \
+                bmi.o
+
+ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
+
+obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
+ath10k_pci-y += pci.o \
+               ce.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
new file mode 100644 (file)
index 0000000..1a2ef51
--- /dev/null
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "bmi.h"
+#include "hif.h"
+#include "debug.h"
+#include "htc.h"
+
+int ath10k_bmi_done(struct ath10k *ar)
+{
+       struct bmi_cmd cmd;
+       u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
+               return 0;
+       }
+
+       ar->bmi.done_sent = true;
+       cmd.id = __cpu_to_le32(BMI_DONE);
+
+       ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+       if (ret) {
+               ath10k_warn("unable to write to the device: %d\n", ret);
+               return ret;
+       }
+
+       ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
+       return 0;
+}
+
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+                              struct bmi_target_info *target_info)
+{
+       struct bmi_cmd cmd;
+       union bmi_resp resp;
+       u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
+       u32 resplen = sizeof(resp.get_target_info);
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn("BMI Get Target Info Command disallowed\n");
+               return -EBUSY;
+       }
+
+       cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
+
+       ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+       if (ret) {
+               ath10k_warn("unable to get target info from device\n");
+               return ret;
+       }
+
+       if (resplen < sizeof(resp.get_target_info)) {
+               ath10k_warn("invalid get_target_info response length (%d)\n",
+                           resplen);
+               return -EIO;
+       }
+
+       target_info->version = __le32_to_cpu(resp.get_target_info.version);
+       target_info->type    = __le32_to_cpu(resp.get_target_info.type);
+       return 0;
+}
+
+int ath10k_bmi_read_memory(struct ath10k *ar,
+                          u32 address, void *buffer, u32 length)
+{
+       struct bmi_cmd cmd;
+       union bmi_resp resp;
+       u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
+       u32 rxlen;
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn("command disallowed\n");
+               return -EBUSY;
+       }
+
+       ath10k_dbg(ATH10K_DBG_CORE,
+                  "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
+                  __func__, ar, address, length);
+
+       while (length) {
+               rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
+
+               cmd.id            = __cpu_to_le32(BMI_READ_MEMORY);
+               cmd.read_mem.addr = __cpu_to_le32(address);
+               cmd.read_mem.len  = __cpu_to_le32(rxlen);
+
+               ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
+                                                 &resp, &rxlen);
+               if (ret) {
+                       ath10k_warn("unable to read from the device\n");
+                       return ret;
+               }
+
+               memcpy(buffer, resp.read_mem.payload, rxlen);
+               address += rxlen;
+               buffer  += rxlen;
+               length  -= rxlen;
+       }
+
+       return 0;
+}
+
+int ath10k_bmi_write_memory(struct ath10k *ar,
+                           u32 address, const void *buffer, u32 length)
+{
+       struct bmi_cmd cmd;
+       u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
+       u32 txlen;
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn("command disallowed\n");
+               return -EBUSY;
+       }
+
+       ath10k_dbg(ATH10K_DBG_CORE,
+                  "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
+                  __func__, ar, address, length);
+
+       while (length) {
+               txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+               /* copy before roundup to avoid reading beyond buffer*/
+               memcpy(cmd.write_mem.payload, buffer, txlen);
+               txlen = roundup(txlen, 4);
+
+               cmd.id             = __cpu_to_le32(BMI_WRITE_MEMORY);
+               cmd.write_mem.addr = __cpu_to_le32(address);
+               cmd.write_mem.len  = __cpu_to_le32(txlen);
+
+               ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+                                                 NULL, NULL);
+               if (ret) {
+                       ath10k_warn("unable to write to the device\n");
+                       return ret;
+               }
+
+               /* fixup roundup() so `length` zeroes out for last chunk */
+               txlen = min(txlen, length);
+
+               address += txlen;
+               buffer  += txlen;
+               length  -= txlen;
+       }
+
+       return 0;
+}
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
+{
+       struct bmi_cmd cmd;
+       union bmi_resp resp;
+       u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
+       u32 resplen = sizeof(resp.execute);
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn("command disallowed\n");
+               return -EBUSY;
+       }
+
+       ath10k_dbg(ATH10K_DBG_CORE,
+                  "%s: (device: 0x%p, address: 0x%x, param: %d)\n",
+                  __func__, ar, address, *param);
+
+       cmd.id            = __cpu_to_le32(BMI_EXECUTE);
+       cmd.execute.addr  = __cpu_to_le32(address);
+       cmd.execute.param = __cpu_to_le32(*param);
+
+       ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+       if (ret) {
+               ath10k_warn("unable to read from the device\n");
+               return ret;
+       }
+
+       if (resplen < sizeof(resp.execute)) {
+               ath10k_warn("invalid execute response length (%d)\n",
+                           resplen);
+               return ret;
+       }
+
+       *param = __le32_to_cpu(resp.execute.result);
+       return 0;
+}
+
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
+{
+       struct bmi_cmd cmd;
+       u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
+       u32 txlen;
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn("command disallowed\n");
+               return -EBUSY;
+       }
+
+       while (length) {
+               txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+               WARN_ON_ONCE(txlen & 3);
+
+               cmd.id          = __cpu_to_le32(BMI_LZ_DATA);
+               cmd.lz_data.len = __cpu_to_le32(txlen);
+               memcpy(cmd.lz_data.payload, buffer, txlen);
+
+               ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+                                                 NULL, NULL);
+               if (ret) {
+                       ath10k_warn("unable to write to the device\n");
+                       return ret;
+               }
+
+               buffer += txlen;
+               length -= txlen;
+       }
+
+       return 0;
+}
+
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
+{
+       struct bmi_cmd cmd;
+       u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
+       int ret;
+
+       if (ar->bmi.done_sent) {
+               ath10k_warn("command disallowed\n");
+               return -EBUSY;
+       }
+
+       cmd.id            = __cpu_to_le32(BMI_LZ_STREAM_START);
+       cmd.lz_start.addr = __cpu_to_le32(address);
+
+       ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+       if (ret) {
+               ath10k_warn("unable to Start LZ Stream to the device\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+int ath10k_bmi_fast_download(struct ath10k *ar,
+                            u32 address, const void *buffer, u32 length)
+{
+       u8 trailer[4] = {};
+       u32 head_len = rounddown(length, 4);
+       u32 trailer_len = length - head_len;
+       int ret;
+
+       ret = ath10k_bmi_lz_stream_start(ar, address);
+       if (ret)
+               return ret;
+
+       /* copy the last word into a zero padded buffer */
+       if (trailer_len > 0)
+               memcpy(trailer, buffer + head_len, trailer_len);
+
+       ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+       if (ret)
+               return ret;
+
+       if (trailer_len > 0)
+               ret = ath10k_bmi_lz_data(ar, trailer, 4);
+
+       if (ret != 0)
+               return ret;
+
+       /*
+        * Close compressed stream and open a new (fake) one.
+        * This serves mainly to flush Target caches.
+        */
+       ret = ath10k_bmi_lz_stream_start(ar, 0x00);
+
+       return ret;
+}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
new file mode 100644 (file)
index 0000000..32c56aa
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BMI_H_
+#define _BMI_H_
+
+#include "core.h"
+
+/*
+ * Bootloader Messaging Interface (BMI)
+ *
+ * BMI is a very simple messaging interface used during initialization
+ * to read memory, write memory, execute code, and to define an
+ * application entry PC.
+ *
+ * It is used to download an application to QCA988x, to provide
+ * patches to code that is already resident on QCA988x, and generally
+ * to examine and modify state.  The Host has an opportunity to use
+ * BMI only once during bootup.  Once the Host issues a BMI_DONE
+ * command, this opportunity ends.
+ *
+ * The Host writes BMI requests to mailbox0, and reads BMI responses
+ * from mailbox0.   BMI requests all begin with a command
+ * (see below for specific commands), and are followed by
+ * command-specific data.
+ *
+ * Flow control:
+ * The Host can only issue a command once the Target gives it a
+ * "BMI Command Credit", using AR8K Counter #4.  As soon as the
+ * Target has completed a command, it issues another BMI Command
+ * Credit (so the Host can issue the next command).
+ *
+ * BMI handles all required Target-side cache flushing.
+ */
+
+/* Maximum data size used for BMI transfers */
+#define BMI_MAX_DATA_SIZE      256
+
+/* len = cmd + addr + length */
+#define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \
+                       sizeof(u32) + \
+                       sizeof(u32) + \
+                       sizeof(u32))
+
+/* BMI Commands */
+
+enum bmi_cmd_id {
+       BMI_NO_COMMAND          = 0,
+       BMI_DONE                = 1,
+       BMI_READ_MEMORY         = 2,
+       BMI_WRITE_MEMORY        = 3,
+       BMI_EXECUTE             = 4,
+       BMI_SET_APP_START       = 5,
+       BMI_READ_SOC_REGISTER   = 6,
+       BMI_READ_SOC_WORD       = 6,
+       BMI_WRITE_SOC_REGISTER  = 7,
+       BMI_WRITE_SOC_WORD      = 7,
+       BMI_GET_TARGET_ID       = 8,
+       BMI_GET_TARGET_INFO     = 8,
+       BMI_ROMPATCH_INSTALL    = 9,
+       BMI_ROMPATCH_UNINSTALL  = 10,
+       BMI_ROMPATCH_ACTIVATE   = 11,
+       BMI_ROMPATCH_DEACTIVATE = 12,
+       BMI_LZ_STREAM_START     = 13, /* should be followed by LZ_DATA */
+       BMI_LZ_DATA             = 14,
+       BMI_NVRAM_PROCESS       = 15,
+};
+
+#define BMI_NVRAM_SEG_NAME_SZ 16
+
+struct bmi_cmd {
+       __le32 id; /* enum bmi_cmd_id */
+       union {
+               struct {
+               } done;
+               struct {
+                       __le32 addr;
+                       __le32 len;
+               } read_mem;
+               struct {
+                       __le32 addr;
+                       __le32 len;
+                       u8 payload[0];
+               } write_mem;
+               struct {
+                       __le32 addr;
+                       __le32 param;
+               } execute;
+               struct {
+                       __le32 addr;
+               } set_app_start;
+               struct {
+                       __le32 addr;
+               } read_soc_reg;
+               struct {
+                       __le32 addr;
+                       __le32 value;
+               } write_soc_reg;
+               struct {
+               } get_target_info;
+               struct {
+                       __le32 rom_addr;
+                       __le32 ram_addr; /* or value */
+                       __le32 size;
+                       __le32 activate; /* 0=install, but dont activate */
+               } rompatch_install;
+               struct {
+                       __le32 patch_id;
+               } rompatch_uninstall;
+               struct {
+                       __le32 count;
+                       __le32 patch_ids[0]; /* length of @count */
+               } rompatch_activate;
+               struct {
+                       __le32 count;
+                       __le32 patch_ids[0]; /* length of @count */
+               } rompatch_deactivate;
+               struct {
+                       __le32 addr;
+               } lz_start;
+               struct {
+                       __le32 len; /* max BMI_MAX_DATA_SIZE */
+                       u8 payload[0]; /* length of @len */
+               } lz_data;
+               struct {
+                       u8 name[BMI_NVRAM_SEG_NAME_SZ];
+               } nvram_process;
+               u8 payload[BMI_MAX_CMDBUF_SIZE];
+       };
+} __packed;
+
+union bmi_resp {
+       struct {
+               u8 payload[0];
+       } read_mem;
+       struct {
+               __le32 result;
+       } execute;
+       struct {
+               __le32 value;
+       } read_soc_reg;
+       struct {
+               __le32 len;
+               __le32 version;
+               __le32 type;
+       } get_target_info;
+       struct {
+               __le32 patch_id;
+       } rompatch_install;
+       struct {
+               __le32 patch_id;
+       } rompatch_uninstall;
+       struct {
+               /* 0 = nothing executed
+                * otherwise = NVRAM segment return value */
+               __le32 result;
+       } nvram_process;
+       u8 payload[BMI_MAX_CMDBUF_SIZE];
+} __packed;
+
+struct bmi_target_info {
+       u32 version;
+       u32 type;
+};
+
+
+/* in msec */
+#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
+
+#define BMI_CE_NUM_TO_TARG 0
+#define BMI_CE_NUM_TO_HOST 1
+
+int ath10k_bmi_done(struct ath10k *ar);
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+                              struct bmi_target_info *target_info);
+int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
+                          void *buffer, u32 length);
+int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
+                           const void *buffer, u32 length);
+
+#define ath10k_bmi_read32(ar, item, val)                               \
+       ({                                                              \
+               int ret;                                                \
+               u32 addr;                                               \
+               __le32 tmp;                                             \
+                                                                       \
+               addr = host_interest_item_address(HI_ITEM(item));       \
+               ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
+               *val = __le32_to_cpu(tmp);                              \
+               ret;                                                    \
+        })
+
+#define ath10k_bmi_write32(ar, item, val)                              \
+       ({                                                              \
+               int ret;                                                \
+               u32 address;                                            \
+               __le32 v = __cpu_to_le32(val);                          \
+                                                                       \
+               address = host_interest_item_address(HI_ITEM(item));    \
+               ret = ath10k_bmi_write_memory(ar, address,              \
+                                             (u8 *)&v, sizeof(v));     \
+               ret;                                                    \
+       })
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param);
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
+int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
+                            const void *buffer, u32 length);
+#endif /* _BMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
new file mode 100644 (file)
index 0000000..61a8ac7
--- /dev/null
@@ -0,0 +1,1189 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hif.h"
+#include "pci.h"
+#include "ce.h"
+#include "debug.h"
+
+/*
+ * Support for Copy Engine hardware, which is mainly used for
+ * communication between Host and Target over a PCIe interconnect.
+ */
+
+/*
+ * A single CopyEngine (CE) comprises two "rings":
+ *   a source ring
+ *   a destination ring
+ *
+ * Each ring consists of a number of descriptors which specify
+ * an address, length, and meta-data.
+ *
+ * Typically, one side of the PCIe interconnect (Host or Target)
+ * controls one ring and the other side controls the other ring.
+ * The source side chooses when to initiate a transfer and it
+ * chooses what to send (buffer address, length). The destination
+ * side keeps a supply of "anonymous receive buffers" available and
+ * it handles incoming data as it arrives (when the destination
+ * recieves an interrupt).
+ *
+ * The sender may send a simple buffer (address/length) or it may
+ * send a small list of buffers.  When a small list is sent, hardware
+ * "gathers" these and they end up in a single destination buffer
+ * with a single interrupt.
+ *
+ * There are several "contexts" managed by this layer -- more, it
+ * may seem -- than should be needed. These are provided mainly for
+ * maximum flexibility and especially to facilitate a simpler HIF
+ * implementation. There are per-CopyEngine recv, send, and watermark
+ * contexts. These are supplied by the caller when a recv, send,
+ * or watermark handler is established and they are echoed back to
+ * the caller when the respective callbacks are invoked. There is
+ * also a per-transfer context supplied by the caller when a buffer
+ * (or sendlist) is sent and when a buffer is enqueued for recv.
+ * These per-transfer contexts are echoed back to the caller when
+ * the buffer is sent/received.
+ */
+
+static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
+                                                      u32 ce_ctrl_addr,
+                                                      unsigned int n)
+{
+       ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
+}
+
+static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
+                                                     u32 ce_ctrl_addr)
+{
+       return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
+}
+
+static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
+                                                     u32 ce_ctrl_addr,
+                                                     unsigned int n)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       void __iomem *indicator_addr;
+
+       if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
+               ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
+               return;
+       }
+
+       /* workaround for QCA988x_1.0 HW CE */
+       indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
+
+       if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
+               iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
+       } else {
+               unsigned long irq_flags;
+               local_irq_save(irq_flags);
+               iowrite32(1, indicator_addr);
+
+               /*
+                * PCIE write waits for ACK in IPQ8K, there is no
+                * need to read back value.
+                */
+               (void)ioread32(indicator_addr);
+               (void)ioread32(indicator_addr); /* conservative */
+
+               ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
+
+               iowrite32(0, indicator_addr);
+               local_irq_restore(irq_flags);
+       }
+}
+
+static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
+                                                    u32 ce_ctrl_addr)
+{
+       return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
+}
+
+static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
+                                                   u32 ce_ctrl_addr)
+{
+       return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
+}
+
+static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
+                                                   u32 ce_ctrl_addr,
+                                                   unsigned int addr)
+{
+       ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
+}
+
+static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
+                                              u32 ce_ctrl_addr,
+                                              unsigned int n)
+{
+       ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
+}
+
+static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
+                                              u32 ce_ctrl_addr,
+                                              unsigned int n)
+{
+       u32 ctrl1_addr = ath10k_pci_read32((ar),
+                                          (ce_ctrl_addr) + CE_CTRL1_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+                          (ctrl1_addr &  ~CE_CTRL1_DMAX_LENGTH_MASK) |
+                          CE_CTRL1_DMAX_LENGTH_SET(n));
+}
+
+static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
+                                                   u32 ce_ctrl_addr,
+                                                   unsigned int n)
+{
+       u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+                          (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
+                          CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
+                                                    u32 ce_ctrl_addr,
+                                                    unsigned int n)
+{
+       u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
+                          (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
+                          CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
+}
+
+static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
+                                                    u32 ce_ctrl_addr)
+{
+       return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
+}
+
+static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
+                                                    u32 ce_ctrl_addr,
+                                                    u32 addr)
+{
+       ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
+}
+
+static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
+                                               u32 ce_ctrl_addr,
+                                               unsigned int n)
+{
+       ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
+}
+
+static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
+                                                  u32 ce_ctrl_addr,
+                                                  unsigned int n)
+{
+       u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
+                          (addr & ~SRC_WATERMARK_HIGH_MASK) |
+                          SRC_WATERMARK_HIGH_SET(n));
+}
+
+static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
+                                                 u32 ce_ctrl_addr,
+                                                 unsigned int n)
+{
+       u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
+                          (addr & ~SRC_WATERMARK_LOW_MASK) |
+                          SRC_WATERMARK_LOW_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
+                                                   u32 ce_ctrl_addr,
+                                                   unsigned int n)
+{
+       u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
+                          (addr & ~DST_WATERMARK_HIGH_MASK) |
+                          DST_WATERMARK_HIGH_SET(n));
+}
+
+static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
+                                                  u32 ce_ctrl_addr,
+                                                  unsigned int n)
+{
+       u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
+                          (addr & ~DST_WATERMARK_LOW_MASK) |
+                          DST_WATERMARK_LOW_SET(n));
+}
+
+static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
+                                                       u32 ce_ctrl_addr)
+{
+       u32 host_ie_addr = ath10k_pci_read32(ar,
+                                            ce_ctrl_addr + HOST_IE_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+                          host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
+}
+
+static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
+                                                       u32 ce_ctrl_addr)
+{
+       u32 host_ie_addr = ath10k_pci_read32(ar,
+                                            ce_ctrl_addr + HOST_IE_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+                          host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
+}
+
+static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
+                                                   u32 ce_ctrl_addr)
+{
+       u32 host_ie_addr = ath10k_pci_read32(ar,
+                                            ce_ctrl_addr + HOST_IE_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
+                          host_ie_addr & ~CE_WATERMARK_MASK);
+}
+
+static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
+                                              u32 ce_ctrl_addr)
+{
+       u32 misc_ie_addr = ath10k_pci_read32(ar,
+                                            ce_ctrl_addr + MISC_IE_ADDRESS);
+
+       ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
+                          misc_ie_addr | CE_ERROR_MASK);
+}
+
+static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
+                                                    u32 ce_ctrl_addr,
+                                                    unsigned int mask)
+{
+       ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
+}
+
+
+/*
+ * Guts of ath10k_ce_send, used by both ath10k_ce_send and
+ * ath10k_ce_sendlist_send.
+ * The caller takes responsibility for any needed locking.
+ */
+static int ath10k_ce_send_nolock(struct ce_state *ce_state,
+                                void *per_transfer_context,
+                                u32 buffer,
+                                unsigned int nbytes,
+                                unsigned int transfer_id,
+                                unsigned int flags)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ce_ring_state *src_ring = ce_state->src_ring;
+       struct ce_desc *desc, *sdesc;
+       unsigned int nentries_mask = src_ring->nentries_mask;
+       unsigned int sw_index = src_ring->sw_index;
+       unsigned int write_index = src_ring->write_index;
+       u32 ctrl_addr = ce_state->ctrl_addr;
+       u32 desc_flags = 0;
+       int ret = 0;
+
+       if (nbytes > ce_state->src_sz_max)
+               ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
+                           __func__, nbytes, ce_state->src_sz_max);
+
+       ath10k_pci_wake(ar);
+
+       if (unlikely(CE_RING_DELTA(nentries_mask,
+                                  write_index, sw_index - 1) <= 0)) {
+               ret = -EIO;
+               goto exit;
+       }
+
+       desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
+                                  write_index);
+       sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
+
+       desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+       if (flags & CE_SEND_FLAG_GATHER)
+               desc_flags |= CE_DESC_FLAGS_GATHER;
+       if (flags & CE_SEND_FLAG_BYTE_SWAP)
+               desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+       sdesc->addr   = __cpu_to_le32(buffer);
+       sdesc->nbytes = __cpu_to_le16(nbytes);
+       sdesc->flags  = __cpu_to_le16(desc_flags);
+
+       *desc = *sdesc;
+
+       src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+       /* Update Source Ring Write Index */
+       write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+       /* WORKAROUND */
+       if (!(flags & CE_SEND_FLAG_GATHER))
+               ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+
+       src_ring->write_index = write_index;
+exit:
+       ath10k_pci_sleep(ar);
+       return ret;
+}
+
+int ath10k_ce_send(struct ce_state *ce_state,
+                  void *per_transfer_context,
+                  u32 buffer,
+                  unsigned int nbytes,
+                  unsigned int transfer_id,
+                  unsigned int flags)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
+                                   buffer, nbytes, transfer_id, flags);
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
+                               unsigned int nbytes, u32 flags)
+{
+       unsigned int num_items = sendlist->num_items;
+       struct ce_sendlist_item *item;
+
+       item = &sendlist->item[num_items];
+       item->data = buffer;
+       item->u.nbytes = nbytes;
+       item->flags = flags;
+       sendlist->num_items++;
+}
+
+int ath10k_ce_sendlist_send(struct ce_state *ce_state,
+                           void *per_transfer_context,
+                           struct ce_sendlist *sendlist,
+                           unsigned int transfer_id)
+{
+       struct ce_ring_state *src_ring = ce_state->src_ring;
+       struct ce_sendlist_item *item;
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       unsigned int nentries_mask = src_ring->nentries_mask;
+       unsigned int num_items = sendlist->num_items;
+       unsigned int sw_index;
+       unsigned int write_index;
+       int i, delta, ret = -ENOMEM;
+
+       spin_lock_bh(&ar_pci->ce_lock);
+
+       sw_index = src_ring->sw_index;
+       write_index = src_ring->write_index;
+
+       delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
+
+       if (delta >= num_items) {
+               /*
+                * Handle all but the last item uniformly.
+                */
+               for (i = 0; i < num_items - 1; i++) {
+                       item = &sendlist->item[i];
+                       ret = ath10k_ce_send_nolock(ce_state,
+                                                   CE_SENDLIST_ITEM_CTXT,
+                                                   (u32) item->data,
+                                                   item->u.nbytes, transfer_id,
+                                                   item->flags |
+                                                   CE_SEND_FLAG_GATHER);
+                       if (ret)
+                               ath10k_warn("CE send failed for item: %d\n", i);
+               }
+               /*
+                * Provide valid context pointer for final item.
+                */
+               item = &sendlist->item[i];
+               ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
+                                           (u32) item->data, item->u.nbytes,
+                                           transfer_id, item->flags);
+               if (ret)
+                       ath10k_warn("CE send failed for last item: %d\n", i);
+       }
+
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+                              void *per_recv_context,
+                              u32 buffer)
+{
+       struct ce_ring_state *dest_ring = ce_state->dest_ring;
+       u32 ctrl_addr = ce_state->ctrl_addr;
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       unsigned int nentries_mask = dest_ring->nentries_mask;
+       unsigned int write_index;
+       unsigned int sw_index;
+       int ret;
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       write_index = dest_ring->write_index;
+       sw_index = dest_ring->sw_index;
+
+       ath10k_pci_wake(ar);
+
+       if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
+               struct ce_desc *base = dest_ring->base_addr_owner_space;
+               struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
+
+               /* Update destination descriptor */
+               desc->addr    = __cpu_to_le32(buffer);
+               desc->nbytes = 0;
+
+               dest_ring->per_transfer_context[write_index] =
+                                                       per_recv_context;
+
+               /* Update Destination Ring Write Index */
+               write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+               ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+               dest_ring->write_index = write_index;
+               ret = 0;
+       } else {
+               ret = -EIO;
+       }
+       ath10k_pci_sleep(ar);
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+/*
+ * Guts of ath10k_ce_completed_recv_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
+                                               void **per_transfer_contextp,
+                                               u32 *bufferp,
+                                               unsigned int *nbytesp,
+                                               unsigned int *transfer_idp,
+                                               unsigned int *flagsp)
+{
+       struct ce_ring_state *dest_ring = ce_state->dest_ring;
+       unsigned int nentries_mask = dest_ring->nentries_mask;
+       unsigned int sw_index = dest_ring->sw_index;
+
+       struct ce_desc *base = dest_ring->base_addr_owner_space;
+       struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+       struct ce_desc sdesc;
+       u16 nbytes;
+
+       /* Copy in one go for performance reasons */
+       sdesc = *desc;
+
+       nbytes = __le16_to_cpu(sdesc.nbytes);
+       if (nbytes == 0) {
+               /*
+                * This closes a relatively unusual race where the Host
+                * sees the updated DRRI before the update to the
+                * corresponding descriptor has completed. We treat this
+                * as a descriptor that is not yet done.
+                */
+               return -EIO;
+       }
+
+       desc->nbytes = 0;
+
+       /* Return data from completed destination descriptor */
+       *bufferp = __le32_to_cpu(sdesc.addr);
+       *nbytesp = nbytes;
+       *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
+
+       if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
+               *flagsp = CE_RECV_FLAG_SWAPPED;
+       else
+               *flagsp = 0;
+
+       if (per_transfer_contextp)
+               *per_transfer_contextp =
+                       dest_ring->per_transfer_context[sw_index];
+
+       /* sanity */
+       dest_ring->per_transfer_context[sw_index] = NULL;
+
+       /* Update sw_index */
+       sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+       dest_ring->sw_index = sw_index;
+
+       return 0;
+}
+
+int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+                                 void **per_transfer_contextp,
+                                 u32 *bufferp,
+                                 unsigned int *nbytesp,
+                                 unsigned int *transfer_idp,
+                                 unsigned int *flagsp)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       ret = ath10k_ce_completed_recv_next_nolock(ce_state,
+                                                  per_transfer_contextp,
+                                                  bufferp, nbytesp,
+                                                  transfer_idp, flagsp);
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+                              void **per_transfer_contextp,
+                              u32 *bufferp)
+{
+       struct ce_ring_state *dest_ring;
+       unsigned int nentries_mask;
+       unsigned int sw_index;
+       unsigned int write_index;
+       int ret;
+       struct ath10k *ar;
+       struct ath10k_pci *ar_pci;
+
+       dest_ring = ce_state->dest_ring;
+
+       if (!dest_ring)
+               return -EIO;
+
+       ar = ce_state->ar;
+       ar_pci = ath10k_pci_priv(ar);
+
+       spin_lock_bh(&ar_pci->ce_lock);
+
+       nentries_mask = dest_ring->nentries_mask;
+       sw_index = dest_ring->sw_index;
+       write_index = dest_ring->write_index;
+       if (write_index != sw_index) {
+               struct ce_desc *base = dest_ring->base_addr_owner_space;
+               struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+
+               /* Return data from completed destination descriptor */
+               *bufferp = __le32_to_cpu(desc->addr);
+
+               if (per_transfer_contextp)
+                       *per_transfer_contextp =
+                               dest_ring->per_transfer_context[sw_index];
+
+               /* sanity */
+               dest_ring->per_transfer_context[sw_index] = NULL;
+
+               /* Update sw_index */
+               sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+               dest_ring->sw_index = sw_index;
+               ret = 0;
+       } else {
+               ret = -EIO;
+       }
+
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+/*
+ * Guts of ath10k_ce_completed_send_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
+                                               void **per_transfer_contextp,
+                                               u32 *bufferp,
+                                               unsigned int *nbytesp,
+                                               unsigned int *transfer_idp)
+{
+       struct ce_ring_state *src_ring = ce_state->src_ring;
+       u32 ctrl_addr = ce_state->ctrl_addr;
+       struct ath10k *ar = ce_state->ar;
+       unsigned int nentries_mask = src_ring->nentries_mask;
+       unsigned int sw_index = src_ring->sw_index;
+       unsigned int read_index;
+       int ret = -EIO;
+
+       if (src_ring->hw_index == sw_index) {
+               /*
+                * The SW completion index has caught up with the cached
+                * version of the HW completion index.
+                * Update the cached HW completion index to see whether
+                * the SW has really caught up to the HW, or if the cached
+                * value of the HW index has become stale.
+                */
+               ath10k_pci_wake(ar);
+               src_ring->hw_index =
+                       ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+               ath10k_pci_sleep(ar);
+       }
+       read_index = src_ring->hw_index;
+
+       if ((read_index != sw_index) && (read_index != 0xffffffff)) {
+               struct ce_desc *sbase = src_ring->shadow_base;
+               struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
+
+               /* Return data from completed source descriptor */
+               *bufferp = __le32_to_cpu(sdesc->addr);
+               *nbytesp = __le16_to_cpu(sdesc->nbytes);
+               *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
+                                               CE_DESC_FLAGS_META_DATA);
+
+               if (per_transfer_contextp)
+                       *per_transfer_contextp =
+                               src_ring->per_transfer_context[sw_index];
+
+               /* sanity */
+               src_ring->per_transfer_context[sw_index] = NULL;
+
+               /* Update sw_index */
+               sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+               src_ring->sw_index = sw_index;
+               ret = 0;
+       }
+
+       return ret;
+}
+
+/* NB: Modeled after ath10k_ce_completed_send_next */
+int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+                              void **per_transfer_contextp,
+                              u32 *bufferp,
+                              unsigned int *nbytesp,
+                              unsigned int *transfer_idp)
+{
+       struct ce_ring_state *src_ring;
+       unsigned int nentries_mask;
+       unsigned int sw_index;
+       unsigned int write_index;
+       int ret;
+       struct ath10k *ar;
+       struct ath10k_pci *ar_pci;
+
+       src_ring = ce_state->src_ring;
+
+       if (!src_ring)
+               return -EIO;
+
+       ar = ce_state->ar;
+       ar_pci = ath10k_pci_priv(ar);
+
+       spin_lock_bh(&ar_pci->ce_lock);
+
+       nentries_mask = src_ring->nentries_mask;
+       sw_index = src_ring->sw_index;
+       write_index = src_ring->write_index;
+
+       if (write_index != sw_index) {
+               struct ce_desc *base = src_ring->base_addr_owner_space;
+               struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
+
+               /* Return data from completed source descriptor */
+               *bufferp = __le32_to_cpu(desc->addr);
+               *nbytesp = __le16_to_cpu(desc->nbytes);
+               *transfer_idp = MS(__le16_to_cpu(desc->flags),
+                                               CE_DESC_FLAGS_META_DATA);
+
+               if (per_transfer_contextp)
+                       *per_transfer_contextp =
+                               src_ring->per_transfer_context[sw_index];
+
+               /* sanity */
+               src_ring->per_transfer_context[sw_index] = NULL;
+
+               /* Update sw_index */
+               sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+               src_ring->sw_index = sw_index;
+               ret = 0;
+       } else {
+               ret = -EIO;
+       }
+
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+                                 void **per_transfer_contextp,
+                                 u32 *bufferp,
+                                 unsigned int *nbytesp,
+                                 unsigned int *transfer_idp)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       ret = ath10k_ce_completed_send_next_nolock(ce_state,
+                                                  per_transfer_contextp,
+                                                  bufferp, nbytesp,
+                                                  transfer_idp);
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ret;
+}
+
+/*
+ * Guts of interrupt handler for per-engine interrupts on a particular CE.
+ *
+ * Invokes registered callbacks for recv_complete,
+ * send_complete, and watermarks.
+ */
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+       u32 ctrl_addr = ce_state->ctrl_addr;
+       void *transfer_context;
+       u32 buf;
+       unsigned int nbytes;
+       unsigned int id;
+       unsigned int flags;
+
+       ath10k_pci_wake(ar);
+       spin_lock_bh(&ar_pci->ce_lock);
+
+       /* Clear the copy-complete interrupts that will be handled here. */
+       ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
+                                         HOST_IS_COPY_COMPLETE_MASK);
+
+       if (ce_state->recv_cb) {
+               /*
+                * Pop completed recv buffers and call the registered
+                * recv callback for each
+                */
+               while (ath10k_ce_completed_recv_next_nolock(ce_state,
+                                                           &transfer_context,
+                                                           &buf, &nbytes,
+                                                           &id, &flags) == 0) {
+                       spin_unlock_bh(&ar_pci->ce_lock);
+                       ce_state->recv_cb(ce_state, transfer_context, buf,
+                                         nbytes, id, flags);
+                       spin_lock_bh(&ar_pci->ce_lock);
+               }
+       }
+
+       if (ce_state->send_cb) {
+               /*
+                * Pop completed send buffers and call the registered
+                * send callback for each
+                */
+               while (ath10k_ce_completed_send_next_nolock(ce_state,
+                                                           &transfer_context,
+                                                           &buf,
+                                                           &nbytes,
+                                                           &id) == 0) {
+                       spin_unlock_bh(&ar_pci->ce_lock);
+                       ce_state->send_cb(ce_state, transfer_context,
+                                         buf, nbytes, id);
+                       spin_lock_bh(&ar_pci->ce_lock);
+               }
+       }
+
+       /*
+        * Misc CE interrupts are not being handled, but still need
+        * to be cleared.
+        */
+       ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
+
+       spin_unlock_bh(&ar_pci->ce_lock);
+       ath10k_pci_sleep(ar);
+}
+
+/*
+ * Handler for per-engine interrupts on ALL active CEs.
+ * This is used in cases where the system is sharing a
+ * single interrput for all CEs
+ */
+
+void ath10k_ce_per_engine_service_any(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ce_id;
+       u32 intr_summary;
+
+       ath10k_pci_wake(ar);
+       intr_summary = CE_INTERRUPT_SUMMARY(ar);
+
+       for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
+               if (intr_summary & (1 << ce_id))
+                       intr_summary &= ~(1 << ce_id);
+               else
+                       /* no intr pending on this CE */
+                       continue;
+
+               ath10k_ce_per_engine_service(ar, ce_id);
+       }
+
+       ath10k_pci_sleep(ar);
+}
+
+/*
+ * Adjust interrupts for the copy complete handler.
+ * If it's needed for either send or recv, then unmask
+ * this interrupt; otherwise, mask it.
+ *
+ * Called with ce_lock held.
+ */
+static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
+                                               int disable_copy_compl_intr)
+{
+       u32 ctrl_addr = ce_state->ctrl_addr;
+       struct ath10k *ar = ce_state->ar;
+
+       ath10k_pci_wake(ar);
+
+       if ((!disable_copy_compl_intr) &&
+           (ce_state->send_cb || ce_state->recv_cb))
+               ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
+       else
+               ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+
+       ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+
+       ath10k_pci_sleep(ar);
+}
+
+void ath10k_ce_disable_interrupts(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ce_id;
+
+       ath10k_pci_wake(ar);
+       for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
+               struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+               u32 ctrl_addr = ce_state->ctrl_addr;
+
+               ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+       }
+       ath10k_pci_sleep(ar);
+}
+
+void ath10k_ce_send_cb_register(struct ce_state *ce_state,
+                               void (*send_cb) (struct ce_state *ce_state,
+                                                void *transfer_context,
+                                                u32 buffer,
+                                                unsigned int nbytes,
+                                                unsigned int transfer_id),
+                               int disable_interrupts)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       ce_state->send_cb = send_cb;
+       ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
+       spin_unlock_bh(&ar_pci->ce_lock);
+}
+
+void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
+                               void (*recv_cb) (struct ce_state *ce_state,
+                                                void *transfer_context,
+                                                u32 buffer,
+                                                unsigned int nbytes,
+                                                unsigned int transfer_id,
+                                                unsigned int flags))
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       ce_state->recv_cb = recv_cb;
+       ath10k_ce_per_engine_handler_adjust(ce_state, 0);
+       spin_unlock_bh(&ar_pci->ce_lock);
+}
+
+static int ath10k_ce_init_src_ring(struct ath10k *ar,
+                                  unsigned int ce_id,
+                                  struct ce_state *ce_state,
+                                  const struct ce_attr *attr)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_ring_state *src_ring;
+       unsigned int nentries = attr->src_nentries;
+       unsigned int ce_nbytes;
+       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+       dma_addr_t base_addr;
+       char *ptr;
+
+       nentries = roundup_pow_of_two(nentries);
+
+       if (ce_state->src_ring) {
+               WARN_ON(ce_state->src_ring->nentries != nentries);
+               return 0;
+       }
+
+       ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+       ptr = kzalloc(ce_nbytes, GFP_KERNEL);
+       if (ptr == NULL)
+               return -ENOMEM;
+
+       ce_state->src_ring = (struct ce_ring_state *)ptr;
+       src_ring = ce_state->src_ring;
+
+       ptr += sizeof(struct ce_ring_state);
+       src_ring->nentries = nentries;
+       src_ring->nentries_mask = nentries - 1;
+
+       ath10k_pci_wake(ar);
+       src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+       src_ring->hw_index = src_ring->sw_index;
+
+       src_ring->write_index =
+               ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
+       ath10k_pci_sleep(ar);
+
+       src_ring->per_transfer_context = (void **)ptr;
+
+       /*
+        * Legacy platforms that do not support cache
+        * coherent DMA are unsupported
+        */
+       src_ring->base_addr_owner_space_unaligned =
+               pci_alloc_consistent(ar_pci->pdev,
+                                    (nentries * sizeof(struct ce_desc) +
+                                     CE_DESC_RING_ALIGN),
+                                    &base_addr);
+       src_ring->base_addr_ce_space_unaligned = base_addr;
+
+       src_ring->base_addr_owner_space = PTR_ALIGN(
+                       src_ring->base_addr_owner_space_unaligned,
+                       CE_DESC_RING_ALIGN);
+       src_ring->base_addr_ce_space = ALIGN(
+                       src_ring->base_addr_ce_space_unaligned,
+                       CE_DESC_RING_ALIGN);
+
+       /*
+        * Also allocate a shadow src ring in regular
+        * mem to use for faster access.
+        */
+       src_ring->shadow_base_unaligned =
+               kmalloc((nentries * sizeof(struct ce_desc) +
+                        CE_DESC_RING_ALIGN), GFP_KERNEL);
+
+       src_ring->shadow_base = PTR_ALIGN(
+                       src_ring->shadow_base_unaligned,
+                       CE_DESC_RING_ALIGN);
+
+       ath10k_pci_wake(ar);
+       ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+                                        src_ring->base_addr_ce_space);
+       ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+       ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+       ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+       ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+       ath10k_pci_sleep(ar);
+
+       return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+                                   unsigned int ce_id,
+                                   struct ce_state *ce_state,
+                                   const struct ce_attr *attr)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_ring_state *dest_ring;
+       unsigned int nentries = attr->dest_nentries;
+       unsigned int ce_nbytes;
+       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+       dma_addr_t base_addr;
+       char *ptr;
+
+       nentries = roundup_pow_of_two(nentries);
+
+       if (ce_state->dest_ring) {
+               WARN_ON(ce_state->dest_ring->nentries != nentries);
+               return 0;
+       }
+
+       ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+       ptr = kzalloc(ce_nbytes, GFP_KERNEL);
+       if (ptr == NULL)
+               return -ENOMEM;
+
+       ce_state->dest_ring = (struct ce_ring_state *)ptr;
+       dest_ring = ce_state->dest_ring;
+
+       ptr += sizeof(struct ce_ring_state);
+       dest_ring->nentries = nentries;
+       dest_ring->nentries_mask = nentries - 1;
+
+       ath10k_pci_wake(ar);
+       dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+       dest_ring->write_index =
+               ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+       ath10k_pci_sleep(ar);
+
+       dest_ring->per_transfer_context = (void **)ptr;
+
+       /*
+        * Legacy platforms that do not support cache
+        * coherent DMA are unsupported
+        */
+       dest_ring->base_addr_owner_space_unaligned =
+               pci_alloc_consistent(ar_pci->pdev,
+                                    (nentries * sizeof(struct ce_desc) +
+                                     CE_DESC_RING_ALIGN),
+                                    &base_addr);
+       dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+       /*
+        * Correctly initialize memory to 0 to prevent garbage
+        * data crashing system when download firmware
+        */
+       memset(dest_ring->base_addr_owner_space_unaligned, 0,
+              nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
+
+       dest_ring->base_addr_owner_space = PTR_ALIGN(
+                       dest_ring->base_addr_owner_space_unaligned,
+                       CE_DESC_RING_ALIGN);
+       dest_ring->base_addr_ce_space = ALIGN(
+                       dest_ring->base_addr_ce_space_unaligned,
+                       CE_DESC_RING_ALIGN);
+
+       ath10k_pci_wake(ar);
+       ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+                                         dest_ring->base_addr_ce_space);
+       ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+       ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+       ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+       ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+       ath10k_pci_sleep(ar);
+
+       return 0;
+}
+
+static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
+                                            unsigned int ce_id,
+                                            const struct ce_attr *attr)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_state *ce_state = NULL;
+       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+       spin_lock_bh(&ar_pci->ce_lock);
+
+       if (!ar_pci->ce_id_to_state[ce_id]) {
+               ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
+               if (ce_state == NULL) {
+                       spin_unlock_bh(&ar_pci->ce_lock);
+                       return NULL;
+               }
+
+               ar_pci->ce_id_to_state[ce_id] = ce_state;
+               ce_state->ar = ar;
+               ce_state->id = ce_id;
+               ce_state->ctrl_addr = ctrl_addr;
+               ce_state->state = CE_RUNNING;
+               /* Save attribute flags */
+               ce_state->attr_flags = attr->flags;
+               ce_state->src_sz_max = attr->src_sz_max;
+       }
+
+       spin_unlock_bh(&ar_pci->ce_lock);
+
+       return ce_state;
+}
+
+/*
+ * Initialize a Copy Engine based on caller-supplied attributes.
+ * This may be called once to initialize both source and destination
+ * rings or it may be called twice for separate source and destination
+ * initialization. It may be that only one side or the other is
+ * initialized by software/firmware.
+ */
+struct ce_state *ath10k_ce_init(struct ath10k *ar,
+                               unsigned int ce_id,
+                               const struct ce_attr *attr)
+{
+       struct ce_state *ce_state;
+       u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+
+       ce_state = ath10k_ce_init_state(ar, ce_id, attr);
+       if (!ce_state) {
+               ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id);
+               return NULL;
+       }
+
+       if (attr->src_nentries) {
+               if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
+                       ath10k_err("Failed to initialize CE src ring for ID: %d\n",
+                                  ce_id);
+                       ath10k_ce_deinit(ce_state);
+                       return NULL;
+               }
+       }
+
+       if (attr->dest_nentries) {
+               if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
+                       ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
+                                  ce_id);
+                       ath10k_ce_deinit(ce_state);
+                       return NULL;
+               }
+       }
+
+       /* Enable CE error interrupts */
+       ath10k_pci_wake(ar);
+       ath10k_ce_error_intr_enable(ar, ctrl_addr);
+       ath10k_pci_sleep(ar);
+
+       return ce_state;
+}
+
+void ath10k_ce_deinit(struct ce_state *ce_state)
+{
+       unsigned int ce_id = ce_state->id;
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       ce_state->state = CE_UNUSED;
+       ar_pci->ce_id_to_state[ce_id] = NULL;
+
+       if (ce_state->src_ring) {
+               kfree(ce_state->src_ring->shadow_base_unaligned);
+               pci_free_consistent(ar_pci->pdev,
+                                   (ce_state->src_ring->nentries *
+                                    sizeof(struct ce_desc) +
+                                    CE_DESC_RING_ALIGN),
+                                   ce_state->src_ring->base_addr_owner_space,
+                                   ce_state->src_ring->base_addr_ce_space);
+               kfree(ce_state->src_ring);
+       }
+
+       if (ce_state->dest_ring) {
+               pci_free_consistent(ar_pci->pdev,
+                                   (ce_state->dest_ring->nentries *
+                                    sizeof(struct ce_desc) +
+                                    CE_DESC_RING_ALIGN),
+                                   ce_state->dest_ring->base_addr_owner_space,
+                                   ce_state->dest_ring->base_addr_ce_space);
+               kfree(ce_state->dest_ring);
+       }
+       kfree(ce_state);
+}
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
new file mode 100644 (file)
index 0000000..c17f07c
--- /dev/null
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _CE_H_
+#define _CE_H_
+
+#include "hif.h"
+
+
+/* Maximum number of Copy Engine's supported */
+#define CE_COUNT_MAX 8
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
+
+/* Descriptor rings must be aligned to this boundary */
+#define CE_DESC_RING_ALIGN     8
+#define CE_SENDLIST_ITEMS_MAX  12
+#define CE_SEND_FLAG_GATHER    0x00010000
+
+/*
+ * Copy Engine support: low-level Target-side Copy Engine API.
+ * This is a hardware access layer used by code that understands
+ * how to use copy engines.
+ */
+
+struct ce_state;
+
+
+/* Copy Engine operational state */
+enum ce_op_state {
+       CE_UNUSED,
+       CE_PAUSED,
+       CE_RUNNING,
+};
+
+#define CE_DESC_FLAGS_GATHER         (1 << 0)
+#define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
+#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
+#define CE_DESC_FLAGS_META_DATA_LSB  3
+
+struct ce_desc {
+       __le32 addr;
+       __le16 nbytes;
+       __le16 flags; /* %CE_DESC_FLAGS_ */
+};
+
+/* Copy Engine Ring internal state */
+struct ce_ring_state {
+       /* Number of entries in this ring; must be power of 2 */
+       unsigned int nentries;
+       unsigned int nentries_mask;
+
+       /*
+        * For dest ring, this is the next index to be processed
+        * by software after it was/is received into.
+        *
+        * For src ring, this is the last descriptor that was sent
+        * and completion processed by software.
+        *
+        * Regardless of src or dest ring, this is an invariant
+        * (modulo ring size):
+        *     write index >= read index >= sw_index
+        */
+       unsigned int sw_index;
+       /* cached copy */
+       unsigned int write_index;
+       /*
+        * For src ring, this is the next index not yet processed by HW.
+        * This is a cached copy of the real HW index (read index), used
+        * for avoiding reading the HW index register more often than
+        * necessary.
+        * This extends the invariant:
+        *     write index >= read index >= hw_index >= sw_index
+        *
+        * For dest ring, this is currently unused.
+        */
+       /* cached copy */
+       unsigned int hw_index;
+
+       /* Start of DMA-coherent area reserved for descriptors */
+       /* Host address space */
+       void *base_addr_owner_space_unaligned;
+       /* CE address space */
+       u32 base_addr_ce_space_unaligned;
+
+       /*
+        * Actual start of descriptors.
+        * Aligned to descriptor-size boundary.
+        * Points into reserved DMA-coherent area, above.
+        */
+       /* Host address space */
+       void *base_addr_owner_space;
+
+       /* CE address space */
+       u32 base_addr_ce_space;
+       /*
+        * Start of shadow copy of descriptors, within regular memory.
+        * Aligned to descriptor-size boundary.
+        */
+       void *shadow_base_unaligned;
+       struct ce_desc *shadow_base;
+
+       void **per_transfer_context;
+};
+
+/* Copy Engine internal state */
+struct ce_state {
+       struct ath10k *ar;
+       unsigned int id;
+
+       unsigned int attr_flags;
+
+       u32 ctrl_addr;
+       enum ce_op_state state;
+
+       void (*send_cb) (struct ce_state *ce_state,
+                        void *per_transfer_send_context,
+                        u32 buffer,
+                        unsigned int nbytes,
+                        unsigned int transfer_id);
+       void (*recv_cb) (struct ce_state *ce_state,
+                        void *per_transfer_recv_context,
+                        u32 buffer,
+                        unsigned int nbytes,
+                        unsigned int transfer_id,
+                        unsigned int flags);
+
+       unsigned int src_sz_max;
+       struct ce_ring_state *src_ring;
+       struct ce_ring_state *dest_ring;
+};
+
+struct ce_sendlist_item {
+       /* e.g. buffer or desc list */
+       dma_addr_t data;
+       union {
+               /* simple buffer */
+               unsigned int nbytes;
+               /* Rx descriptor list */
+               unsigned int ndesc;
+       } u;
+       /* externally-specified flags; OR-ed with internal flags */
+       u32 flags;
+};
+
+struct ce_sendlist {
+       unsigned int num_items;
+       struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
+};
+
+/* Copy Engine settable attributes */
+struct ce_attr;
+
+/*==================Send====================*/
+
+/* ath10k_ce_send flags */
+#define CE_SEND_FLAG_BYTE_SWAP 1
+
+/*
+ * Queue a source buffer to be sent to an anonymous destination buffer.
+ *   ce         - which copy engine to use
+ *   buffer          - address of buffer
+ *   nbytes          - number of bytes to send
+ *   transfer_id     - arbitrary ID; reflected to destination
+ *   flags           - CE_SEND_FLAG_* values
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Note: If no flags are specified, use CE's default data swap mode.
+ *
+ * Implementation note: pushes 1 buffer to Source ring
+ */
+int ath10k_ce_send(struct ce_state *ce_state,
+                  void *per_transfer_send_context,
+                  u32 buffer,
+                  unsigned int nbytes,
+                  /* 14 bits */
+                  unsigned int transfer_id,
+                  unsigned int flags);
+
+void ath10k_ce_send_cb_register(struct ce_state *ce_state,
+                               void (*send_cb) (struct ce_state *ce_state,
+                                                void *transfer_context,
+                                                u32 buffer,
+                                                unsigned int nbytes,
+                                                unsigned int transfer_id),
+                               int disable_interrupts);
+
+/* Append a simple buffer (address/length) to a sendlist. */
+void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
+                               u32 buffer,
+                               unsigned int nbytes,
+                               /* OR-ed with internal flags */
+                               u32 flags);
+
+/*
+ * Queue a "sendlist" of buffers to be sent using gather to a single
+ * anonymous destination buffer
+ *   ce         - which copy engine to use
+ *   sendlist        - list of simple buffers to send using gather
+ *   transfer_id     - arbitrary ID; reflected to destination
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Implemenation note: Pushes multiple buffers with Gather to Source ring.
+ */
+int ath10k_ce_sendlist_send(struct ce_state *ce_state,
+                           void *per_transfer_send_context,
+                           struct ce_sendlist *sendlist,
+                           /* 14 bits */
+                           unsigned int transfer_id);
+
+/*==================Recv=======================*/
+
+/*
+ * Make a buffer available to receive. The buffer must be at least of a
+ * minimal size appropriate for this copy engine (src_sz_max attribute).
+ *   ce                    - which copy engine to use
+ *   per_transfer_recv_context  - context passed back to caller's recv_cb
+ *   buffer                     - address of buffer in CE space
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Implemenation note: Pushes a buffer to Dest ring.
+ */
+int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+                              void *per_transfer_recv_context,
+                              u32 buffer);
+
+void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
+                               void (*recv_cb) (struct ce_state *ce_state,
+                                                void *transfer_context,
+                                                u32 buffer,
+                                                unsigned int nbytes,
+                                                unsigned int transfer_id,
+                                                unsigned int flags));
+
+/* recv flags */
+/* Data is byte-swapped */
+#define CE_RECV_FLAG_SWAPPED   1
+
+/*
+ * Supply data for the next completed unprocessed receive descriptor.
+ * Pops buffer from Dest ring.
+ */
+int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+                                 void **per_transfer_contextp,
+                                 u32 *bufferp,
+                                 unsigned int *nbytesp,
+                                 unsigned int *transfer_idp,
+                                 unsigned int *flagsp);
+/*
+ * Supply data for the next completed unprocessed send descriptor.
+ * Pops 1 completed send buffer from Source ring.
+ */
+int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+                          void **per_transfer_contextp,
+                          u32 *bufferp,
+                          unsigned int *nbytesp,
+                          unsigned int *transfer_idp);
+
+/*==================CE Engine Initialization=======================*/
+
+/* Initialize an instance of a CE */
+struct ce_state *ath10k_ce_init(struct ath10k *ar,
+                               unsigned int ce_id,
+                               const struct ce_attr *attr);
+
+/*==================CE Engine Shutdown=======================*/
+/*
+ * Support clean shutdown by allowing the caller to revoke
+ * receive buffers.  Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+                              void **per_transfer_contextp,
+                              u32 *bufferp);
+
+/*
+ * Support clean shutdown by allowing the caller to cancel
+ * pending sends.  Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+                              void **per_transfer_contextp,
+                              u32 *bufferp,
+                              unsigned int *nbytesp,
+                              unsigned int *transfer_idp);
+
+void ath10k_ce_deinit(struct ce_state *ce_state);
+
+/*==================CE Interrupt Handlers====================*/
+void ath10k_ce_per_engine_service_any(struct ath10k *ar);
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
+void ath10k_ce_disable_interrupts(struct ath10k *ar);
+
+/* ce_attr.flags values */
+/* Use NonSnooping PCIe accesses? */
+#define CE_ATTR_NO_SNOOP               1
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA         2
+
+/* Swizzle descriptors? */
+#define CE_ATTR_SWIZZLE_DESCRIPTORS    4
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR               8
+
+/* Attributes of an instance of a Copy Engine */
+struct ce_attr {
+       /* CE_ATTR_* values */
+       unsigned int flags;
+
+       /* currently not in use */
+       unsigned int priority;
+
+       /* #entries in source ring - Must be a power of 2 */
+       unsigned int src_nentries;
+
+       /*
+        * Max source send size for this CE.
+        * This is also the minimum size of a destination buffer.
+        */
+       unsigned int src_sz_max;
+
+       /* #entries in destination ring - Must be a power of 2 */
+       unsigned int dest_nentries;
+
+       /* Future use */
+       void *reserved;
+};
+
+/*
+ * When using sendlist_send to transfer multiple buffer fragments, the
+ * transfer context of each fragment, except last one, will be filled
+ * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
+ * each fragment done with send and the transfer context would be
+ * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
+ * status of a send completion.
+ */
+#define CE_SENDLIST_ITEM_CTXT  ((void *)0xcecebeef)
+
+#define SR_BA_ADDRESS          0x0000
+#define SR_SIZE_ADDRESS                0x0004
+#define DR_BA_ADDRESS          0x0008
+#define DR_SIZE_ADDRESS                0x000c
+#define CE_CMD_ADDRESS         0x0018
+
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB     17
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB     17
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK    0x00020000
+#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
+       (((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
+       CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
+
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB     16
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB     16
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK    0x00010000
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
+       (((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
+        CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
+#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
+       (((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
+        CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
+
+#define CE_CTRL1_DMAX_LENGTH_MSB               15
+#define CE_CTRL1_DMAX_LENGTH_LSB               0
+#define CE_CTRL1_DMAX_LENGTH_MASK              0x0000ffff
+#define CE_CTRL1_DMAX_LENGTH_GET(x) \
+       (((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
+#define CE_CTRL1_DMAX_LENGTH_SET(x) \
+       (((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
+
+#define CE_CTRL1_ADDRESS                       0x0010
+#define CE_CTRL1_HW_MASK                       0x0007ffff
+#define CE_CTRL1_SW_MASK                       0x0007ffff
+#define CE_CTRL1_HW_WRITE_MASK                 0x00000000
+#define CE_CTRL1_SW_WRITE_MASK                 0x0007ffff
+#define CE_CTRL1_RSTMASK                       0xffffffff
+#define CE_CTRL1_RESET                         0x00000080
+
+#define CE_CMD_HALT_STATUS_MSB                 3
+#define CE_CMD_HALT_STATUS_LSB                 3
+#define CE_CMD_HALT_STATUS_MASK                        0x00000008
+#define CE_CMD_HALT_STATUS_GET(x) \
+       (((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
+#define CE_CMD_HALT_STATUS_SET(x) \
+       (((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
+#define CE_CMD_HALT_STATUS_RESET               0
+#define CE_CMD_HALT_MSB                                0
+#define CE_CMD_HALT_MASK                       0x00000001
+
+#define HOST_IE_COPY_COMPLETE_MSB              0
+#define HOST_IE_COPY_COMPLETE_LSB              0
+#define HOST_IE_COPY_COMPLETE_MASK             0x00000001
+#define HOST_IE_COPY_COMPLETE_GET(x) \
+       (((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
+#define HOST_IE_COPY_COMPLETE_SET(x) \
+       (((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
+#define HOST_IE_COPY_COMPLETE_RESET            0
+#define HOST_IE_ADDRESS                                0x002c
+
+#define HOST_IS_DST_RING_LOW_WATERMARK_MASK    0x00000010
+#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK   0x00000008
+#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK    0x00000004
+#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK   0x00000002
+#define HOST_IS_COPY_COMPLETE_MASK             0x00000001
+#define HOST_IS_ADDRESS                                0x0030
+
+#define MISC_IE_ADDRESS                                0x0034
+
+#define MISC_IS_AXI_ERR_MASK                   0x00000400
+
+#define MISC_IS_DST_ADDR_ERR_MASK              0x00000200
+#define MISC_IS_SRC_LEN_ERR_MASK               0x00000100
+#define MISC_IS_DST_MAX_LEN_VIO_MASK           0x00000080
+#define MISC_IS_DST_RING_OVERFLOW_MASK         0x00000040
+#define MISC_IS_SRC_RING_OVERFLOW_MASK         0x00000020
+
+#define MISC_IS_ADDRESS                                0x0038
+
+#define SR_WR_INDEX_ADDRESS                    0x003c
+
+#define DST_WR_INDEX_ADDRESS                   0x0040
+
+#define CURRENT_SRRI_ADDRESS                   0x0044
+
+#define CURRENT_DRRI_ADDRESS                   0x0048
+
+#define SRC_WATERMARK_LOW_MSB                  31
+#define SRC_WATERMARK_LOW_LSB                  16
+#define SRC_WATERMARK_LOW_MASK                 0xffff0000
+#define SRC_WATERMARK_LOW_GET(x) \
+       (((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
+#define SRC_WATERMARK_LOW_SET(x) \
+       (((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
+#define SRC_WATERMARK_LOW_RESET                        0
+#define SRC_WATERMARK_HIGH_MSB                 15
+#define SRC_WATERMARK_HIGH_LSB                 0
+#define SRC_WATERMARK_HIGH_MASK                        0x0000ffff
+#define SRC_WATERMARK_HIGH_GET(x) \
+       (((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
+#define SRC_WATERMARK_HIGH_SET(x) \
+       (((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
+#define SRC_WATERMARK_HIGH_RESET               0
+#define SRC_WATERMARK_ADDRESS                  0x004c
+
+#define DST_WATERMARK_LOW_LSB                  16
+#define DST_WATERMARK_LOW_MASK                 0xffff0000
+#define DST_WATERMARK_LOW_SET(x) \
+       (((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
+#define DST_WATERMARK_LOW_RESET                        0
+#define DST_WATERMARK_HIGH_MSB                 15
+#define DST_WATERMARK_HIGH_LSB                 0
+#define DST_WATERMARK_HIGH_MASK                        0x0000ffff
+#define DST_WATERMARK_HIGH_GET(x) \
+       (((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
+#define DST_WATERMARK_HIGH_SET(x) \
+       (((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
+#define DST_WATERMARK_HIGH_RESET               0
+#define DST_WATERMARK_ADDRESS                  0x0050
+
+
+static inline u32 ath10k_ce_base_address(unsigned int ce_id)
+{
+       return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
+}
+
+#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK  | \
+                          HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
+                          HOST_IS_DST_RING_LOW_WATERMARK_MASK  | \
+                          HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
+
+#define CE_ERROR_MASK  (MISC_IS_AXI_ERR_MASK           | \
+                        MISC_IS_DST_ADDR_ERR_MASK      | \
+                        MISC_IS_SRC_LEN_ERR_MASK       | \
+                        MISC_IS_DST_MAX_LEN_VIO_MASK   | \
+                        MISC_IS_DST_RING_OVERFLOW_MASK | \
+                        MISC_IS_SRC_RING_OVERFLOW_MASK)
+
+#define CE_SRC_RING_TO_DESC(baddr, idx) \
+       (&(((struct ce_desc *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC(baddr, idx) \
+       (&(((struct ce_desc *)baddr)[idx]))
+
+/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
+#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
+       (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB              8
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK             0x0000ff00
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
+       (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
+               CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
+#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS                   0x0000
+
+#define CE_INTERRUPT_SUMMARY(ar) \
+       CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \
+               ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \
+               CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS))
+
+#endif /* _CE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
new file mode 100644 (file)
index 0000000..2b3426b
--- /dev/null
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include "core.h"
+#include "mac.h"
+#include "htc.h"
+#include "hif.h"
+#include "wmi.h"
+#include "bmi.h"
+#include "debug.h"
+#include "htt.h"
+
+unsigned int ath10k_debug_mask;
+static bool uart_print;
+static unsigned int ath10k_p2p;
+module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param(uart_print, bool, 0644);
+module_param_named(p2p, ath10k_p2p, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+MODULE_PARM_DESC(uart_print, "Uart target debugging");
+MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
+
+static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+       {
+               .id = QCA988X_HW_1_0_VERSION,
+               .name = "qca988x hw1.0",
+               .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
+               .fw = {
+                       .dir = QCA988X_HW_1_0_FW_DIR,
+                       .fw = QCA988X_HW_1_0_FW_FILE,
+                       .otp = QCA988X_HW_1_0_OTP_FILE,
+                       .board = QCA988X_HW_1_0_BOARD_DATA_FILE,
+               },
+       },
+       {
+               .id = QCA988X_HW_2_0_VERSION,
+               .name = "qca988x hw2.0",
+               .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
+               .fw = {
+                       .dir = QCA988X_HW_2_0_FW_DIR,
+                       .fw = QCA988X_HW_2_0_FW_FILE,
+                       .otp = QCA988X_HW_2_0_OTP_FILE,
+                       .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
+               },
+       },
+};
+
+static void ath10k_send_suspend_complete(struct ath10k *ar)
+{
+       ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
+
+       ar->is_target_paused = true;
+       wake_up(&ar->event_queue);
+}
+
+static int ath10k_check_fw_version(struct ath10k *ar)
+{
+       char version[32];
+
+       if (ar->fw_version_major >= SUPPORTED_FW_MAJOR &&
+           ar->fw_version_minor >= SUPPORTED_FW_MINOR &&
+           ar->fw_version_release >= SUPPORTED_FW_RELEASE &&
+           ar->fw_version_build >= SUPPORTED_FW_BUILD)
+               return 0;
+
+       snprintf(version, sizeof(version), "%u.%u.%u.%u",
+                SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR,
+                SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD);
+
+       ath10k_warn("WARNING: Firmware version %s is not officially supported.\n",
+                   ar->hw->wiphy->fw_version);
+       ath10k_warn("Please upgrade to version %s (or newer)\n", version);
+
+       return 0;
+}
+
+static int ath10k_init_connect_htc(struct ath10k *ar)
+{
+       int status;
+
+       status = ath10k_wmi_connect_htc_service(ar);
+       if (status)
+               goto conn_fail;
+
+       /* Start HTC */
+       status = ath10k_htc_start(ar->htc);
+       if (status)
+               goto conn_fail;
+
+       /* Wait for WMI event to be ready */
+       status = ath10k_wmi_wait_for_service_ready(ar);
+       if (status <= 0) {
+               ath10k_warn("wmi service ready event not received");
+               status = -ETIMEDOUT;
+               goto timeout;
+       }
+
+       ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
+       return 0;
+
+timeout:
+       ath10k_htc_stop(ar->htc);
+conn_fail:
+       return status;
+}
+
+static int ath10k_init_configure_target(struct ath10k *ar)
+{
+       u32 param_host;
+       int ret;
+
+       /* tell target which HTC version it is used*/
+       ret = ath10k_bmi_write32(ar, hi_app_host_interest,
+                                HTC_PROTOCOL_VERSION);
+       if (ret) {
+               ath10k_err("settings HTC version failed\n");
+               return ret;
+       }
+
+       /* set the firmware mode to STA/IBSS/AP */
+       ret = ath10k_bmi_read32(ar, hi_option_flag, &param_host);
+       if (ret) {
+               ath10k_err("setting firmware mode (1/2) failed\n");
+               return ret;
+       }
+
+       /* TODO following parameters need to be re-visited. */
+       /* num_device */
+       param_host |= (1 << HI_OPTION_NUM_DEV_SHIFT);
+       /* Firmware mode */
+       /* FIXME: Why FW_MODE_AP ??.*/
+       param_host |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT);
+       /* mac_addr_method */
+       param_host |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
+       /* firmware_bridge */
+       param_host |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
+       /* fwsubmode */
+       param_host |= (0 << HI_OPTION_FW_SUBMODE_SHIFT);
+
+       ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
+       if (ret) {
+               ath10k_err("setting firmware mode (2/2) failed\n");
+               return ret;
+       }
+
+       /* We do all byte-swapping on the host */
+       ret = ath10k_bmi_write32(ar, hi_be, 0);
+       if (ret) {
+               ath10k_err("setting host CPU BE mode failed\n");
+               return ret;
+       }
+
+       /* FW descriptor/Data swap flags */
+       ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
+
+       if (ret) {
+               ath10k_err("setting FW data/desc swap flags failed\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
+                                                  const char *dir,
+                                                  const char *file)
+{
+       char filename[100];
+       const struct firmware *fw;
+       int ret;
+
+       if (file == NULL)
+               return ERR_PTR(-ENOENT);
+
+       if (dir == NULL)
+               dir = ".";
+
+       snprintf(filename, sizeof(filename), "%s/%s", dir, file);
+       ret = request_firmware(&fw, filename, ar->dev);
+       if (ret)
+               return ERR_PTR(ret);
+
+       return fw;
+}
+
+static int ath10k_push_board_ext_data(struct ath10k *ar,
+                                     const struct firmware *fw)
+{
+       u32 board_data_size = QCA988X_BOARD_DATA_SZ;
+       u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
+       u32 board_ext_data_addr;
+       int ret;
+
+       ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
+       if (ret) {
+               ath10k_err("could not read board ext data addr (%d)\n", ret);
+               return ret;
+       }
+
+       ath10k_dbg(ATH10K_DBG_CORE,
+                  "ath10k: Board extended Data download addr: 0x%x\n",
+                  board_ext_data_addr);
+
+       if (board_ext_data_addr == 0)
+               return 0;
+
+       if (fw->size != (board_data_size + board_ext_data_size)) {
+               ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
+                          fw->size, board_data_size, board_ext_data_size);
+               return -EINVAL;
+       }
+
+       ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
+                                     fw->data + board_data_size,
+                                     board_ext_data_size);
+       if (ret) {
+               ath10k_err("could not write board ext data (%d)\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
+                                (board_ext_data_size << 16) | 1);
+       if (ret) {
+               ath10k_err("could not write board ext data bit (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int ath10k_download_board_data(struct ath10k *ar)
+{
+       u32 board_data_size = QCA988X_BOARD_DATA_SZ;
+       u32 address;
+       const struct firmware *fw;
+       int ret;
+
+       fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+                                 ar->hw_params.fw.board);
+       if (IS_ERR(fw)) {
+               ath10k_err("could not fetch board data fw file (%ld)\n",
+                          PTR_ERR(fw));
+               return PTR_ERR(fw);
+       }
+
+       ret = ath10k_push_board_ext_data(ar, fw);
+       if (ret) {
+               ath10k_err("could not push board ext data (%d)\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_bmi_read32(ar, hi_board_data, &address);
+       if (ret) {
+               ath10k_err("could not read board data addr (%d)\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_bmi_write_memory(ar, address, fw->data,
+                                     min_t(u32, board_data_size, fw->size));
+       if (ret) {
+               ath10k_err("could not write board data (%d)\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
+       if (ret) {
+               ath10k_err("could not write board data bit (%d)\n", ret);
+               goto exit;
+       }
+
+exit:
+       release_firmware(fw);
+       return ret;
+}
+
+static int ath10k_download_and_run_otp(struct ath10k *ar)
+{
+       const struct firmware *fw;
+       u32 address;
+       u32 exec_param;
+       int ret;
+
+       /* OTP is optional */
+
+       if (ar->hw_params.fw.otp == NULL) {
+               ath10k_info("otp file not defined\n");
+               return 0;
+       }
+
+       address = ar->hw_params.patch_load_addr;
+
+       fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+                                 ar->hw_params.fw.otp);
+       if (IS_ERR(fw)) {
+               ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw));
+               return 0;
+       }
+
+       ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+       if (ret) {
+               ath10k_err("could not write otp (%d)\n", ret);
+               goto exit;
+       }
+
+       exec_param = 0;
+       ret = ath10k_bmi_execute(ar, address, &exec_param);
+       if (ret) {
+               ath10k_err("could not execute otp (%d)\n", ret);
+               goto exit;
+       }
+
+exit:
+       release_firmware(fw);
+       return ret;
+}
+
+static int ath10k_download_fw(struct ath10k *ar)
+{
+       const struct firmware *fw;
+       u32 address;
+       int ret;
+
+       if (ar->hw_params.fw.fw == NULL)
+               return -EINVAL;
+
+       address = ar->hw_params.patch_load_addr;
+
+       fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+                                 ar->hw_params.fw.fw);
+       if (IS_ERR(fw)) {
+               ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw));
+               return PTR_ERR(fw);
+       }
+
+       ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+       if (ret) {
+               ath10k_err("could not write fw (%d)\n", ret);
+               goto exit;
+       }
+
+exit:
+       release_firmware(fw);
+       return ret;
+}
+
+static int ath10k_init_download_firmware(struct ath10k *ar)
+{
+       int ret;
+
+       ret = ath10k_download_board_data(ar);
+       if (ret)
+               return ret;
+
+       ret = ath10k_download_and_run_otp(ar);
+       if (ret)
+               return ret;
+
+       ret = ath10k_download_fw(ar);
+       if (ret)
+               return ret;
+
+       return ret;
+}
+
+static int ath10k_init_uart(struct ath10k *ar)
+{
+       int ret;
+
+       /*
+        * Explicitly setting UART prints to zero as target turns it on
+        * based on scratch registers.
+        */
+       ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
+       if (ret) {
+               ath10k_warn("could not disable UART prints (%d)\n", ret);
+               return ret;
+       }
+
+       if (!uart_print) {
+               ath10k_info("UART prints disabled\n");
+               return 0;
+       }
+
+       ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
+       if (ret) {
+               ath10k_warn("could not enable UART prints (%d)\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
+       if (ret) {
+               ath10k_warn("could not enable UART prints (%d)\n", ret);
+               return ret;
+       }
+
+       ath10k_info("UART prints enabled\n");
+       return 0;
+}
+
+static int ath10k_init_hw_params(struct ath10k *ar)
+{
+       const struct ath10k_hw_params *uninitialized_var(hw_params);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
+               hw_params = &ath10k_hw_params_list[i];
+
+               if (hw_params->id == ar->target_version)
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
+               ath10k_err("Unsupported hardware version: 0x%x\n",
+                          ar->target_version);
+               return -EINVAL;
+       }
+
+       ar->hw_params = *hw_params;
+
+       ath10k_info("Hardware name %s version 0x%x\n",
+                   ar->hw_params.name, ar->target_version);
+
+       return 0;
+}
+
+struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
+                                 enum ath10k_bus bus,
+                                 const struct ath10k_hif_ops *hif_ops)
+{
+       struct ath10k *ar;
+
+       ar = ath10k_mac_create();
+       if (!ar)
+               return NULL;
+
+       ar->ath_common.priv = ar;
+       ar->ath_common.hw = ar->hw;
+
+       ar->p2p = !!ath10k_p2p;
+       ar->dev = dev;
+
+       ar->hif.priv = hif_priv;
+       ar->hif.ops = hif_ops;
+       ar->hif.bus = bus;
+
+       ar->free_vdev_map = 0xFF; /* 8 vdevs */
+
+       init_completion(&ar->scan.started);
+       init_completion(&ar->scan.completed);
+       init_completion(&ar->scan.on_channel);
+
+       init_completion(&ar->install_key_done);
+       init_completion(&ar->vdev_setup_done);
+
+       setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
+
+       ar->workqueue = create_singlethread_workqueue("ath10k_wq");
+       if (!ar->workqueue)
+               goto err_wq;
+
+       mutex_init(&ar->conf_mutex);
+       spin_lock_init(&ar->data_lock);
+
+       INIT_LIST_HEAD(&ar->peers);
+       init_waitqueue_head(&ar->peer_mapping_wq);
+
+       init_completion(&ar->offchan_tx_completed);
+       INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
+       skb_queue_head_init(&ar->offchan_tx_queue);
+
+       init_waitqueue_head(&ar->event_queue);
+
+       return ar;
+
+err_wq:
+       ath10k_mac_destroy(ar);
+       return NULL;
+}
+EXPORT_SYMBOL(ath10k_core_create);
+
+void ath10k_core_destroy(struct ath10k *ar)
+{
+       flush_workqueue(ar->workqueue);
+       destroy_workqueue(ar->workqueue);
+
+       ath10k_mac_destroy(ar);
+}
+EXPORT_SYMBOL(ath10k_core_destroy);
+
+
+int ath10k_core_register(struct ath10k *ar)
+{
+       struct ath10k_htc_ops htc_ops;
+       struct bmi_target_info target_info;
+       int status;
+
+       memset(&target_info, 0, sizeof(target_info));
+       status = ath10k_bmi_get_target_info(ar, &target_info);
+       if (status)
+               goto err;
+
+       ar->target_version = target_info.version;
+       ar->hw->wiphy->hw_version = target_info.version;
+
+       status = ath10k_init_hw_params(ar);
+       if (status)
+               goto err;
+
+       if (ath10k_init_configure_target(ar)) {
+               status = -EINVAL;
+               goto err;
+       }
+
+       status = ath10k_init_download_firmware(ar);
+       if (status)
+               goto err;
+
+       status = ath10k_init_uart(ar);
+       if (status)
+               goto err;
+
+       htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete;
+
+       ar->htc = ath10k_htc_create(ar, &htc_ops);
+       if (IS_ERR(ar->htc)) {
+               status = PTR_ERR(ar->htc);
+               ath10k_err("could not create HTC (%d)\n", status);
+               goto err;
+       }
+
+       status = ath10k_bmi_done(ar);
+       if (status)
+               goto err_htc_destroy;
+
+       status = ath10k_wmi_attach(ar);
+       if (status) {
+               ath10k_err("WMI attach failed: %d\n", status);
+               goto err_htc_destroy;
+       }
+
+       status = ath10k_htc_wait_target(ar->htc);
+       if (status)
+               goto err_wmi_detach;
+
+       ar->htt = ath10k_htt_attach(ar);
+       if (!ar->htt) {
+               status = -ENOMEM;
+               goto err_wmi_detach;
+       }
+
+       status = ath10k_init_connect_htc(ar);
+       if (status)
+               goto err_htt_detach;
+
+       ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
+
+       status = ath10k_check_fw_version(ar);
+       if (status)
+               goto err_disconnect_htc;
+
+       status = ath10k_wmi_cmd_init(ar);
+       if (status) {
+               ath10k_err("could not send WMI init command (%d)\n", status);
+               goto err_disconnect_htc;
+       }
+
+       status = ath10k_wmi_wait_for_unified_ready(ar);
+       if (status <= 0) {
+               ath10k_err("wmi unified ready event not received\n");
+               status = -ETIMEDOUT;
+               goto err_disconnect_htc;
+       }
+
+       status = ath10k_htt_attach_target(ar->htt);
+       if (status)
+               goto err_disconnect_htc;
+
+       status = ath10k_mac_register(ar);
+       if (status)
+               goto err_disconnect_htc;
+
+       status = ath10k_debug_create(ar);
+       if (status) {
+               ath10k_err("unable to initialize debugfs\n");
+               goto err_unregister_mac;
+       }
+
+       return 0;
+
+err_unregister_mac:
+       ath10k_mac_unregister(ar);
+err_disconnect_htc:
+       ath10k_htc_stop(ar->htc);
+err_htt_detach:
+       ath10k_htt_detach(ar->htt);
+err_wmi_detach:
+       ath10k_wmi_detach(ar);
+err_htc_destroy:
+       ath10k_htc_destroy(ar->htc);
+err:
+       return status;
+}
+EXPORT_SYMBOL(ath10k_core_register);
+
+void ath10k_core_unregister(struct ath10k *ar)
+{
+       /* We must unregister from mac80211 before we stop HTC and HIF.
+        * Otherwise we will fail to submit commands to FW and mac80211 will be
+        * unhappy about callback failures. */
+       ath10k_mac_unregister(ar);
+       ath10k_htc_stop(ar->htc);
+       ath10k_htt_detach(ar->htt);
+       ath10k_wmi_detach(ar);
+       ath10k_htc_destroy(ar->htc);
+}
+EXPORT_SYMBOL(ath10k_core_unregister);
+
+int ath10k_core_target_suspend(struct ath10k *ar)
+{
+       int ret;
+
+       ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
+
+       ret = ath10k_wmi_pdev_suspend_target(ar);
+       if (ret)
+               ath10k_warn("could not suspend target (%d)\n", ret);
+
+       return ret;
+}
+EXPORT_SYMBOL(ath10k_core_target_suspend);
+
+int ath10k_core_target_resume(struct ath10k *ar)
+{
+       int ret;
+
+       ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
+
+       ret = ath10k_wmi_pdev_resume_target(ar);
+       if (ret)
+               ath10k_warn("could not resume target (%d)\n", ret);
+
+       return ret;
+}
+EXPORT_SYMBOL(ath10k_core_target_resume);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
new file mode 100644 (file)
index 0000000..539336d
--- /dev/null
@@ -0,0 +1,369 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "htc.h"
+#include "hw.h"
+#include "targaddrs.h"
+#include "wmi.h"
+#include "../ath.h"
+#include "../regd.h"
+
+#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+#define WO(_f)      ((_f##_OFFSET) >> 2)
+
+#define ATH10K_SCAN_ID 0
+#define WMI_READY_TIMEOUT (5 * HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
+
+/* Antenna noise floor */
+#define ATH10K_DEFAULT_NOISE_FLOOR -95
+
+struct ath10k;
+
+enum ath10k_bus {
+       ATH10K_BUS_PCI,
+};
+
+struct ath10k_skb_cb {
+       dma_addr_t paddr;
+       bool is_mapped;
+       bool is_aborted;
+
+       struct {
+               u8 vdev_id;
+               u16 msdu_id;
+               u8 tid;
+               bool is_offchan;
+               bool is_conf;
+               bool discard;
+               bool no_ack;
+               u8 refcount;
+               struct sk_buff *txfrag;
+               struct sk_buff *msdu;
+       } __packed htt;
+
+       /* 4 bytes left on 64bit arch */
+} __packed;
+
+static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
+                    IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+       return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
+{
+       if (ATH10K_SKB_CB(skb)->is_mapped)
+               return -EINVAL;
+
+       ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
+                                                  DMA_TO_DEVICE);
+
+       if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
+               return -EIO;
+
+       ATH10K_SKB_CB(skb)->is_mapped = true;
+       return 0;
+}
+
+static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
+{
+       if (!ATH10K_SKB_CB(skb)->is_mapped)
+               return -EINVAL;
+
+       dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
+                        DMA_TO_DEVICE);
+       ATH10K_SKB_CB(skb)->is_mapped = false;
+       return 0;
+}
+
+static inline u32 host_interest_item_address(u32 item_offset)
+{
+       return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
+}
+
+struct ath10k_bmi {
+       bool done_sent;
+};
+
+struct ath10k_wmi {
+       enum ath10k_htc_ep_id eid;
+       struct completion service_ready;
+       struct completion unified_ready;
+       atomic_t pending_tx_count;
+       wait_queue_head_t wq;
+
+       struct sk_buff_head wmi_event_list;
+       struct work_struct wmi_event_work;
+};
+
+struct ath10k_peer_stat {
+       u8 peer_macaddr[ETH_ALEN];
+       u32 peer_rssi;
+       u32 peer_tx_rate;
+};
+
+struct ath10k_target_stats {
+       /* PDEV stats */
+       s32 ch_noise_floor;
+       u32 tx_frame_count;
+       u32 rx_frame_count;
+       u32 rx_clear_count;
+       u32 cycle_count;
+       u32 phy_err_count;
+       u32 chan_tx_power;
+
+       /* PDEV TX stats */
+       s32 comp_queued;
+       s32 comp_delivered;
+       s32 msdu_enqued;
+       s32 mpdu_enqued;
+       s32 wmm_drop;
+       s32 local_enqued;
+       s32 local_freed;
+       s32 hw_queued;
+       s32 hw_reaped;
+       s32 underrun;
+       s32 tx_abort;
+       s32 mpdus_requed;
+       u32 tx_ko;
+       u32 data_rc;
+       u32 self_triggers;
+       u32 sw_retry_failure;
+       u32 illgl_rate_phy_err;
+       u32 pdev_cont_xretry;
+       u32 pdev_tx_timeout;
+       u32 pdev_resets;
+       u32 phy_underrun;
+       u32 txop_ovf;
+
+       /* PDEV RX stats */
+       s32 mid_ppdu_route_change;
+       s32 status_rcvd;
+       s32 r0_frags;
+       s32 r1_frags;
+       s32 r2_frags;
+       s32 r3_frags;
+       s32 htt_msdus;
+       s32 htt_mpdus;
+       s32 loc_msdus;
+       s32 loc_mpdus;
+       s32 oversize_amsdu;
+       s32 phy_errs;
+       s32 phy_err_drop;
+       s32 mpdu_errs;
+
+       /* VDEV STATS */
+
+       /* PEER STATS */
+       u8 peers;
+       struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
+
+       /* TODO: Beacon filter stats */
+
+};
+
+#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
+
+struct ath10k_peer {
+       struct list_head list;
+       int vdev_id;
+       u8 addr[ETH_ALEN];
+       DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
+       struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+};
+
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
+
+struct ath10k_vif {
+       u32 vdev_id;
+       enum wmi_vdev_type vdev_type;
+       enum wmi_vdev_subtype vdev_subtype;
+       u32 beacon_interval;
+       u32 dtim_period;
+
+       struct ath10k *ar;
+       struct ieee80211_vif *vif;
+
+       struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
+       u8 def_wep_key_index;
+
+       u16 tx_seq_no;
+
+       union {
+               struct {
+                       u8 bssid[ETH_ALEN];
+                       u32 uapsd;
+               } sta;
+               struct {
+                       /* 127 stations; wmi limit */
+                       u8 tim_bitmap[16];
+                       u8 tim_len;
+                       u32 ssid_len;
+                       u8 ssid[IEEE80211_MAX_SSID_LEN];
+                       bool hidden_ssid;
+                       /* P2P_IE with NoA attribute for P2P_GO case */
+                       u32 noa_len;
+                       u8 *noa_data;
+               } ap;
+               struct {
+                       u8 bssid[ETH_ALEN];
+               } ibss;
+       } u;
+};
+
+struct ath10k_vif_iter {
+       u32 vdev_id;
+       struct ath10k_vif *arvif;
+};
+
+struct ath10k_debug {
+       struct dentry *debugfs_phy;
+
+       struct ath10k_target_stats target_stats;
+       u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
+
+       struct completion event_stats_compl;
+};
+
+struct ath10k {
+       struct ath_common ath_common;
+       struct ieee80211_hw *hw;
+       struct device *dev;
+       u8 mac_addr[ETH_ALEN];
+
+       u32 target_version;
+       u8 fw_version_major;
+       u32 fw_version_minor;
+       u16 fw_version_release;
+       u16 fw_version_build;
+       u32 phy_capability;
+       u32 hw_min_tx_power;
+       u32 hw_max_tx_power;
+       u32 ht_cap_info;
+       u32 vht_cap_info;
+
+       struct targetdef *targetdef;
+       struct hostdef *hostdef;
+
+       bool p2p;
+
+       struct {
+               void *priv;
+               enum ath10k_bus bus;
+               const struct ath10k_hif_ops *ops;
+       } hif;
+
+       struct ath10k_wmi wmi;
+
+       wait_queue_head_t event_queue;
+       bool is_target_paused;
+
+       struct ath10k_bmi bmi;
+
+       struct ath10k_htc *htc;
+       struct ath10k_htt *htt;
+
+       struct ath10k_hw_params {
+               u32 id;
+               const char *name;
+               u32 patch_load_addr;
+
+               struct ath10k_hw_params_fw {
+                       const char *dir;
+                       const char *fw;
+                       const char *otp;
+                       const char *board;
+               } fw;
+       } hw_params;
+
+       struct {
+               struct completion started;
+               struct completion completed;
+               struct completion on_channel;
+               struct timer_list timeout;
+               bool is_roc;
+               bool in_progress;
+               bool aborting;
+               int vdev_id;
+               int roc_freq;
+       } scan;
+
+       struct {
+               struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+       } mac;
+
+       /* should never be NULL; needed for regular htt rx */
+       struct ieee80211_channel *rx_channel;
+
+       /* valid during scan; needed for mgmt rx during scan */
+       struct ieee80211_channel *scan_channel;
+
+       int free_vdev_map;
+       int monitor_vdev_id;
+       bool monitor_enabled;
+       bool monitor_present;
+       unsigned int filter_flags;
+
+       struct wmi_pdev_set_wmm_params_arg wmm_params;
+       struct completion install_key_done;
+
+       struct completion vdev_setup_done;
+
+       struct workqueue_struct *workqueue;
+
+       /* prevents concurrent FW reconfiguration */
+       struct mutex conf_mutex;
+
+       /* protects shared structure data */
+       spinlock_t data_lock;
+
+       struct list_head peers;
+       wait_queue_head_t peer_mapping_wq;
+
+       struct work_struct offchan_tx_work;
+       struct sk_buff_head offchan_tx_queue;
+       struct completion offchan_tx_completed;
+       struct sk_buff *offchan_tx_skb;
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+       struct ath10k_debug debug;
+#endif
+};
+
+struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
+                                 enum ath10k_bus bus,
+                                 const struct ath10k_hif_ops *hif_ops);
+void ath10k_core_destroy(struct ath10k *ar);
+
+int ath10k_core_register(struct ath10k *ar);
+void ath10k_core_unregister(struct ath10k *ar);
+
+int ath10k_core_target_suspend(struct ath10k *ar);
+int ath10k_core_target_resume(struct ath10k *ar);
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
new file mode 100644 (file)
index 0000000..499034b
--- /dev/null
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+
+#include "core.h"
+#include "debug.h"
+
+static int ath10k_printk(const char *level, const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+       int rtn;
+
+       va_start(args, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &args;
+
+       rtn = printk("%sath10k: %pV", level, &vaf);
+
+       va_end(args);
+
+       return rtn;
+}
+
+int ath10k_info(const char *fmt, ...)
+{
+       struct va_format vaf = {
+               .fmt = fmt,
+       };
+       va_list args;
+       int ret;
+
+       va_start(args, fmt);
+       vaf.va = &args;
+       ret = ath10k_printk(KERN_INFO, "%pV", &vaf);
+       trace_ath10k_log_info(&vaf);
+       va_end(args);
+
+       return ret;
+}
+EXPORT_SYMBOL(ath10k_info);
+
+int ath10k_err(const char *fmt, ...)
+{
+       struct va_format vaf = {
+               .fmt = fmt,
+       };
+       va_list args;
+       int ret;
+
+       va_start(args, fmt);
+       vaf.va = &args;
+       ret = ath10k_printk(KERN_ERR, "%pV", &vaf);
+       trace_ath10k_log_err(&vaf);
+       va_end(args);
+
+       return ret;
+}
+EXPORT_SYMBOL(ath10k_err);
+
+int ath10k_warn(const char *fmt, ...)
+{
+       struct va_format vaf = {
+               .fmt = fmt,
+       };
+       va_list args;
+       int ret = 0;
+
+       va_start(args, fmt);
+       vaf.va = &args;
+
+       if (net_ratelimit())
+               ret = ath10k_printk(KERN_WARNING, "%pV", &vaf);
+
+       trace_ath10k_log_warn(&vaf);
+
+       va_end(args);
+
+       return ret;
+}
+EXPORT_SYMBOL(ath10k_warn);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+
+void ath10k_debug_read_service_map(struct ath10k *ar,
+                                  void *service_map,
+                                  size_t map_size)
+{
+       memcpy(ar->debug.wmi_service_bitmap, service_map, map_size);
+}
+
+static ssize_t ath10k_read_wmi_services(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       char *buf;
+       unsigned int len = 0, buf_len = 1500;
+       const char *status;
+       ssize_t ret_cnt;
+       int i;
+
+       buf = kzalloc(buf_len, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (len > buf_len)
+               len = buf_len;
+
+       for (i = 0; i < WMI_SERVICE_LAST; i++) {
+               if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i))
+                       status = "enabled";
+               else
+                       status = "disabled";
+
+               len += scnprintf(buf + len, buf_len - len,
+                                "0x%02x - %20s - %s\n",
+                                i, wmi_service_name(i), status);
+       }
+
+       ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+       mutex_unlock(&ar->conf_mutex);
+
+       kfree(buf);
+       return ret_cnt;
+}
+
+static const struct file_operations fops_wmi_services = {
+       .read = ath10k_read_wmi_services,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+void ath10k_debug_read_target_stats(struct ath10k *ar,
+                                   struct wmi_stats_event *ev)
+{
+       u8 *tmp = ev->data;
+       struct ath10k_target_stats *stats;
+       int num_pdev_stats, num_vdev_stats, num_peer_stats;
+       struct wmi_pdev_stats *ps;
+       int i;
+
+       mutex_lock(&ar->conf_mutex);
+
+       stats = &ar->debug.target_stats;
+
+       num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */
+       num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */
+       num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
+
+       if (num_pdev_stats) {
+               ps = (struct wmi_pdev_stats *)tmp;
+
+               stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
+               stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
+               stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count);
+               stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count);
+               stats->cycle_count = __le32_to_cpu(ps->cycle_count);
+               stats->phy_err_count = __le32_to_cpu(ps->phy_err_count);
+               stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr);
+
+               stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued);
+               stats->comp_delivered =
+                       __le32_to_cpu(ps->wal.tx.comp_delivered);
+               stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued);
+               stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued);
+               stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop);
+               stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued);
+               stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed);
+               stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued);
+               stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped);
+               stats->underrun = __le32_to_cpu(ps->wal.tx.underrun);
+               stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort);
+               stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed);
+               stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko);
+               stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc);
+               stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers);
+               stats->sw_retry_failure =
+                       __le32_to_cpu(ps->wal.tx.sw_retry_failure);
+               stats->illgl_rate_phy_err =
+                       __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err);
+               stats->pdev_cont_xretry =
+                       __le32_to_cpu(ps->wal.tx.pdev_cont_xretry);
+               stats->pdev_tx_timeout =
+                       __le32_to_cpu(ps->wal.tx.pdev_tx_timeout);
+               stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets);
+               stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun);
+               stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf);
+
+               stats->mid_ppdu_route_change =
+                       __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change);
+               stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd);
+               stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags);
+               stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags);
+               stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags);
+               stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags);
+               stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus);
+               stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus);
+               stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus);
+               stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus);
+               stats->oversize_amsdu =
+                       __le32_to_cpu(ps->wal.rx.oversize_amsdu);
+               stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs);
+               stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
+               stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
+
+               tmp += sizeof(struct wmi_pdev_stats);
+       }
+
+       /* 0 or max vdevs */
+       /* Currently firmware does not support VDEV stats */
+       if (num_vdev_stats) {
+               struct wmi_vdev_stats *vdev_stats;
+
+               for (i = 0; i < num_vdev_stats; i++) {
+                       vdev_stats = (struct wmi_vdev_stats *)tmp;
+                       tmp += sizeof(struct wmi_vdev_stats);
+               }
+       }
+
+       if (num_peer_stats) {
+               struct wmi_peer_stats *peer_stats;
+               struct ath10k_peer_stat *s;
+
+               stats->peers = num_peer_stats;
+
+               for (i = 0; i < num_peer_stats; i++) {
+                       peer_stats = (struct wmi_peer_stats *)tmp;
+                       s = &stats->peer_stat[i];
+
+                       WMI_MAC_ADDR_TO_CHAR_ARRAY(&peer_stats->peer_macaddr,
+                                                  s->peer_macaddr);
+                       s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
+                       s->peer_tx_rate =
+                               __le32_to_cpu(peer_stats->peer_tx_rate);
+
+                       tmp += sizeof(struct wmi_peer_stats);
+               }
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+       complete(&ar->debug.event_stats_compl);
+}
+
+static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       struct ath10k_target_stats *fw_stats;
+       char *buf;
+       unsigned int len = 0, buf_len = 2500;
+       ssize_t ret_cnt;
+       long left;
+       int i;
+       int ret;
+
+       fw_stats = &ar->debug.target_stats;
+
+       buf = kzalloc(buf_len, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
+       if (ret) {
+               ath10k_warn("could not request stats (%d)\n", ret);
+               kfree(buf);
+               return -EIO;
+       }
+
+       left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
+
+       if (left <= 0) {
+               kfree(buf);
+               return -ETIMEDOUT;
+       }
+
+       mutex_lock(&ar->conf_mutex);
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n",
+                        "ath10k PDEV stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Channel noise floor", fw_stats->ch_noise_floor);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "Channel TX power", fw_stats->chan_tx_power);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "TX frame count", fw_stats->tx_frame_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "RX frame count", fw_stats->rx_frame_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "RX clear count", fw_stats->rx_clear_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "Cycle count", fw_stats->cycle_count);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+                        "PHY error count", fw_stats->phy_err_count);
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n",
+                        "ath10k PDEV TX stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "HTT cookies queued", fw_stats->comp_queued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "HTT cookies disp.", fw_stats->comp_delivered);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MSDU queued", fw_stats->msdu_enqued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDU queued", fw_stats->mpdu_enqued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MSDUs dropped", fw_stats->wmm_drop);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Local enqued", fw_stats->local_enqued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Local freed", fw_stats->local_freed);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "HW queued", fw_stats->hw_queued);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PPDUs reaped", fw_stats->hw_reaped);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Num underruns", fw_stats->underrun);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PPDUs cleaned", fw_stats->tx_abort);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDUs requed", fw_stats->mpdus_requed);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Excessive retries", fw_stats->tx_ko);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "HW rate", fw_stats->data_rc);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Sched self tiggers", fw_stats->self_triggers);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Dropped due to SW retries",
+                        fw_stats->sw_retry_failure);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Illegal rate phy errors",
+                        fw_stats->illgl_rate_phy_err);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Pdev continous xretry", fw_stats->pdev_cont_xretry);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "TX timeout", fw_stats->pdev_tx_timeout);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PDEV resets", fw_stats->pdev_resets);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PHY underrun", fw_stats->phy_underrun);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDU is more than txop limit", fw_stats->txop_ovf);
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n",
+                        "ath10k PDEV RX stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Mid PPDU route change",
+                        fw_stats->mid_ppdu_route_change);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Tot. number of statuses", fw_stats->status_rcvd);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Extra frags on rings 0", fw_stats->r0_frags);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Extra frags on rings 1", fw_stats->r1_frags);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Extra frags on rings 2", fw_stats->r2_frags);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Extra frags on rings 3", fw_stats->r3_frags);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MSDUs delivered to HTT", fw_stats->htt_msdus);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDUs delivered to HTT", fw_stats->htt_mpdus);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MSDUs delivered to stack", fw_stats->loc_msdus);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDUs delivered to stack", fw_stats->loc_mpdus);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "Oversized AMSUs", fw_stats->oversize_amsdu);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PHY errors", fw_stats->phy_errs);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "PHY errors drops", fw_stats->phy_err_drop);
+       len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+                        "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
+
+       len += scnprintf(buf + len, buf_len - len, "\n");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n",
+                        "ath10k PEER stats");
+       len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+                                "=================");
+
+       for (i = 0; i < fw_stats->peers; i++) {
+               len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+                                "Peer MAC address",
+                                fw_stats->peer_stat[i].peer_macaddr);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "Peer RSSI", fw_stats->peer_stat[i].peer_rssi);
+               len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+                                "Peer TX rate",
+                                fw_stats->peer_stat[i].peer_tx_rate);
+               len += scnprintf(buf + len, buf_len - len, "\n");
+       }
+
+       if (len > buf_len)
+               len = buf_len;
+
+       ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+       mutex_unlock(&ar->conf_mutex);
+
+       kfree(buf);
+       return ret_cnt;
+}
+
+static const struct file_operations fops_fw_stats = {
+       .read = ath10k_read_fw_stats,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+int ath10k_debug_create(struct ath10k *ar)
+{
+       ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
+                                                  ar->hw->wiphy->debugfsdir);
+
+       if (!ar->debug.debugfs_phy)
+               return -ENOMEM;
+
+       init_completion(&ar->debug.event_stats_compl);
+
+       debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
+                           &fops_fw_stats);
+
+       debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
+                           &fops_wmi_services);
+
+       return 0;
+}
+#endif /* CONFIG_ATH10K_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       va_start(args, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &args;
+
+       if (ath10k_debug_mask & mask)
+               ath10k_printk(KERN_DEBUG, "%pV", &vaf);
+
+       trace_ath10k_log_dbg(mask, &vaf);
+
+       va_end(args);
+}
+EXPORT_SYMBOL(ath10k_dbg);
+
+void ath10k_dbg_dump(enum ath10k_debug_mask mask,
+                    const char *msg, const char *prefix,
+                    const void *buf, size_t len)
+{
+       if (ath10k_debug_mask & mask) {
+               if (msg)
+                       ath10k_dbg(mask, "%s\n", msg);
+
+               print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
+       }
+
+       /* tracing code doesn't like null strings :/ */
+       trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
+                                 buf, len);
+}
+EXPORT_SYMBOL(ath10k_dbg_dump);
+
+#endif /* CONFIG_ATH10K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
new file mode 100644 (file)
index 0000000..168140c
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DEBUG_H_
+#define _DEBUG_H_
+
+#include <linux/types.h>
+#include "trace.h"
+
+enum ath10k_debug_mask {
+       ATH10K_DBG_PCI          = 0x00000001,
+       ATH10K_DBG_WMI          = 0x00000002,
+       ATH10K_DBG_HTC          = 0x00000004,
+       ATH10K_DBG_HTT          = 0x00000008,
+       ATH10K_DBG_MAC          = 0x00000010,
+       ATH10K_DBG_CORE         = 0x00000020,
+       ATH10K_DBG_PCI_DUMP     = 0x00000040,
+       ATH10K_DBG_HTT_DUMP     = 0x00000080,
+       ATH10K_DBG_MGMT         = 0x00000100,
+       ATH10K_DBG_DATA         = 0x00000200,
+       ATH10K_DBG_ANY          = 0xffffffff,
+};
+
+extern unsigned int ath10k_debug_mask;
+
+extern __printf(1, 2) int ath10k_info(const char *fmt, ...);
+extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
+extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_create(struct ath10k *ar);
+void ath10k_debug_read_service_map(struct ath10k *ar,
+                                  void *service_map,
+                                  size_t map_size);
+void ath10k_debug_read_target_stats(struct ath10k *ar,
+                                   struct wmi_stats_event *ev);
+
+#else
+static inline int ath10k_debug_create(struct ath10k *ar)
+{
+       return 0;
+}
+
+static inline void ath10k_debug_read_service_map(struct ath10k *ar,
+                                                void *service_map,
+                                                size_t map_size)
+{
+}
+
+static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
+                                                 struct wmi_stats_event *ev)
+{
+}
+#endif /* CONFIG_ATH10K_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
+                                     const char *fmt, ...);
+void ath10k_dbg_dump(enum ath10k_debug_mask mask,
+                    const char *msg, const char *prefix,
+                    const void *buf, size_t len);
+#else /* CONFIG_ATH10K_DEBUG */
+
+static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask,
+                            const char *fmt, ...)
+{
+       return 0;
+}
+
+static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask,
+                                  const char *msg, const char *prefix,
+                                  const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH10K_DEBUG */
+#endif /* _DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
new file mode 100644 (file)
index 0000000..73a24d4
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HIF_H_
+#define _HIF_H_
+
+#include <linux/kernel.h>
+#include "core.h"
+
+struct ath10k_hif_cb {
+       int (*tx_completion)(struct ath10k *ar,
+                            struct sk_buff *wbuf,
+                            unsigned transfer_id);
+       int (*rx_completion)(struct ath10k *ar,
+                            struct sk_buff *wbuf,
+                            u8 pipe_id);
+};
+
+struct ath10k_hif_ops {
+       /* Send the head of a buffer to HIF for transmission to the target. */
+       int (*send_head)(struct ath10k *ar, u8 pipe_id,
+                        unsigned int transfer_id,
+                        unsigned int nbytes,
+                        struct sk_buff *buf);
+
+       /*
+        * API to handle HIF-specific BMI message exchanges, this API is
+        * synchronous and only allowed to be called from a context that
+        * can block (sleep)
+        */
+       int (*exchange_bmi_msg)(struct ath10k *ar,
+                               void *request, u32 request_len,
+                               void *response, u32 *response_len);
+
+       int (*start)(struct ath10k *ar);
+
+       void (*stop)(struct ath10k *ar);
+
+       int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
+                                  u8 *ul_pipe, u8 *dl_pipe,
+                                  int *ul_is_polled, int *dl_is_polled);
+
+       void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
+
+       /*
+        * Check if prior sends have completed.
+        *
+        * Check whether the pipe in question has any completed
+        * sends that have not yet been processed.
+        * This function is only relevant for HIF pipes that are configured
+        * to be polled rather than interrupt-driven.
+        */
+       void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
+
+       void (*init)(struct ath10k *ar,
+                    struct ath10k_hif_cb *callbacks);
+
+       u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
+};
+
+
+static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id,
+                                      unsigned int transfer_id,
+                                      unsigned int nbytes,
+                                      struct sk_buff *buf)
+{
+       return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf);
+}
+
+static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
+                                             void *request, u32 request_len,
+                                             void *response, u32 *response_len)
+{
+       return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
+                                            response, response_len);
+}
+
+static inline int ath10k_hif_start(struct ath10k *ar)
+{
+       return ar->hif.ops->start(ar);
+}
+
+static inline void ath10k_hif_stop(struct ath10k *ar)
+{
+       return ar->hif.ops->stop(ar);
+}
+
+static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
+                                                u16 service_id,
+                                                u8 *ul_pipe, u8 *dl_pipe,
+                                                int *ul_is_polled,
+                                                int *dl_is_polled)
+{
+       return ar->hif.ops->map_service_to_pipe(ar, service_id,
+                                               ul_pipe, dl_pipe,
+                                               ul_is_polled, dl_is_polled);
+}
+
+static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
+                                              u8 *ul_pipe, u8 *dl_pipe)
+{
+       ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
+                                                 u8 pipe_id, int force)
+{
+       ar->hif.ops->send_complete_check(ar, pipe_id, force);
+}
+
+static inline void ath10k_hif_init(struct ath10k *ar,
+                                  struct ath10k_hif_cb *callbacks)
+{
+       ar->hif.ops->init(ar, callbacks);
+}
+
+static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
+                                                  u8 pipe_id)
+{
+       return ar->hif.ops->get_free_queue_number(ar, pipe_id);
+}
+
+#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
new file mode 100644 (file)
index 0000000..74363c9
--- /dev/null
@@ -0,0 +1,1000 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+
+/********/
+/* Send */
+/********/
+
+static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
+                                                 int force)
+{
+       /*
+        * Check whether HIF has any prior sends that have finished,
+        * have not had the post-processing done.
+        */
+       ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
+}
+
+static void ath10k_htc_control_tx_complete(struct ath10k *ar,
+                                          struct sk_buff *skb)
+{
+       kfree_skb(skb);
+}
+
+static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
+{
+       struct sk_buff *skb;
+       struct ath10k_skb_cb *skb_cb;
+
+       skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
+       if (!skb) {
+               ath10k_warn("Unable to allocate ctrl skb\n");
+               return NULL;
+       }
+
+       skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
+       WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+       skb_cb = ATH10K_SKB_CB(skb);
+       memset(skb_cb, 0, sizeof(*skb_cb));
+
+       ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
+       return skb;
+}
+
+static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
+                                            struct sk_buff *skb)
+{
+       ath10k_skb_unmap(htc->ar->dev, skb);
+       skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+}
+
+static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+                                           struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+                  ep->eid, skb);
+
+       ath10k_htc_restore_tx_skb(ep->htc, skb);
+
+       if (!ep->ep_ops.ep_tx_complete) {
+               ath10k_warn("no tx handler for eid %d\n", ep->eid);
+               dev_kfree_skb_any(skb);
+               return;
+       }
+
+       ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
+}
+
+/* assumes tx_lock is held */
+static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
+{
+       if (!ep->tx_credit_flow_enabled)
+               return false;
+       if (ep->tx_credits >= ep->tx_credits_per_max_message)
+               return false;
+
+       ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
+                  ep->eid);
+       return true;
+}
+
+static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
+                                     struct sk_buff *skb)
+{
+       struct ath10k_htc_hdr *hdr;
+
+       hdr = (struct ath10k_htc_hdr *)skb->data;
+       memset(hdr, 0, sizeof(*hdr));
+
+       hdr->eid = ep->eid;
+       hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+
+       spin_lock_bh(&ep->htc->tx_lock);
+       hdr->seq_no = ep->seq_no++;
+
+       if (ath10k_htc_ep_need_credit_update(ep))
+               hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
+
+       spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
+                               struct ath10k_htc_ep *ep,
+                               struct sk_buff *skb,
+                               u8 credits)
+{
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+       int ret;
+
+       ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+                  ep->eid, skb);
+
+       ath10k_htc_prepare_tx_skb(ep, skb);
+
+       ret = ath10k_skb_map(htc->ar->dev, skb);
+       if (ret)
+               goto err;
+
+       ret = ath10k_hif_send_head(htc->ar,
+                                  ep->ul_pipe_id,
+                                  ep->eid,
+                                  skb->len,
+                                  skb);
+       if (unlikely(ret))
+               goto err;
+
+       return 0;
+err:
+       ath10k_warn("HTC issue failed: %d\n", ret);
+
+       spin_lock_bh(&htc->tx_lock);
+       ep->tx_credits += credits;
+       spin_unlock_bh(&htc->tx_lock);
+
+       /* this is the simplest way to handle out-of-resources for non-credit
+        * based endpoints. credit based endpoints can still get -ENOSR, but
+        * this is highly unlikely as credit reservation should prevent that */
+       if (ret == -ENOSR) {
+               spin_lock_bh(&htc->tx_lock);
+               __skb_queue_head(&ep->tx_queue, skb);
+               spin_unlock_bh(&htc->tx_lock);
+
+               return ret;
+       }
+
+       skb_cb->is_aborted = true;
+       ath10k_htc_notify_tx_completion(ep, skb);
+
+       return ret;
+}
+
+static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
+                                                      struct ath10k_htc_ep *ep,
+                                                      u8 *credits)
+{
+       struct sk_buff *skb;
+       struct ath10k_skb_cb *skb_cb;
+       int credits_required;
+       int remainder;
+       unsigned int transfer_len;
+
+       lockdep_assert_held(&htc->tx_lock);
+
+       skb = __skb_dequeue(&ep->tx_queue);
+       if (!skb)
+               return NULL;
+
+       skb_cb = ATH10K_SKB_CB(skb);
+       transfer_len = skb->len;
+
+       if (likely(transfer_len <= htc->target_credit_size)) {
+               credits_required = 1;
+       } else {
+               /* figure out how many credits this message requires */
+               credits_required = transfer_len / htc->target_credit_size;
+               remainder = transfer_len % htc->target_credit_size;
+
+               if (remainder)
+                       credits_required++;
+       }
+
+       ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
+                  credits_required, ep->tx_credits);
+
+       if (ep->tx_credits < credits_required) {
+               __skb_queue_head(&ep->tx_queue, skb);
+               return NULL;
+       }
+
+       ep->tx_credits -= credits_required;
+       *credits = credits_required;
+       return skb;
+}
+
+static void ath10k_htc_send_work(struct work_struct *work)
+{
+       struct ath10k_htc_ep *ep = container_of(work,
+                                       struct ath10k_htc_ep, send_work);
+       struct ath10k_htc *htc = ep->htc;
+       struct sk_buff *skb;
+       u8 credits = 0;
+       int ret;
+
+       while (true) {
+               if (ep->ul_is_polled)
+                       ath10k_htc_send_complete_check(ep, 0);
+
+               spin_lock_bh(&htc->tx_lock);
+               if (ep->tx_credit_flow_enabled)
+                       skb = ath10k_htc_get_skb_credit_based(htc, ep,
+                                                             &credits);
+               else
+                       skb = __skb_dequeue(&ep->tx_queue);
+               spin_unlock_bh(&htc->tx_lock);
+
+               if (!skb)
+                       break;
+
+               ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
+               if (ret == -ENOSR)
+                       break;
+       }
+}
+
+int ath10k_htc_send(struct ath10k_htc *htc,
+                   enum ath10k_htc_ep_id eid,
+                   struct sk_buff *skb)
+{
+       struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+
+       if (eid >= ATH10K_HTC_EP_COUNT) {
+               ath10k_warn("Invalid endpoint id: %d\n", eid);
+               return -ENOENT;
+       }
+
+       skb_push(skb, sizeof(struct ath10k_htc_hdr));
+
+       spin_lock_bh(&htc->tx_lock);
+       __skb_queue_tail(&ep->tx_queue, skb);
+       spin_unlock_bh(&htc->tx_lock);
+
+       queue_work(htc->ar->workqueue, &ep->send_work);
+       return 0;
+}
+
+static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
+                                           struct sk_buff *skb,
+                                           unsigned int eid)
+{
+       struct ath10k_htc *htc = ar->htc;
+       struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+       bool stopping;
+
+       ath10k_htc_notify_tx_completion(ep, skb);
+       /* the skb now belongs to the completion handler */
+
+       spin_lock_bh(&htc->tx_lock);
+       stopping = htc->stopping;
+       spin_unlock_bh(&htc->tx_lock);
+
+       if (!ep->tx_credit_flow_enabled && !stopping)
+               /*
+                * note: when using TX credit flow, the re-checking of
+                * queues happens when credits flow back from the target.
+                * in the non-TX credit case, we recheck after the packet
+                * completes
+                */
+               queue_work(ar->workqueue, &ep->send_work);
+
+       return 0;
+}
+
+/* flush endpoint TX queue */
+static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
+                                        struct ath10k_htc_ep *ep)
+{
+       struct sk_buff *skb;
+       struct ath10k_skb_cb *skb_cb;
+
+       spin_lock_bh(&htc->tx_lock);
+       for (;;) {
+               skb = __skb_dequeue(&ep->tx_queue);
+               if (!skb)
+                       break;
+
+               skb_cb = ATH10K_SKB_CB(skb);
+               skb_cb->is_aborted = true;
+               ath10k_htc_notify_tx_completion(ep, skb);
+       }
+       spin_unlock_bh(&htc->tx_lock);
+
+       cancel_work_sync(&ep->send_work);
+}
+
+/***********/
+/* Receive */
+/***********/
+
+static void
+ath10k_htc_process_credit_report(struct ath10k_htc *htc,
+                                const struct ath10k_htc_credit_report *report,
+                                int len,
+                                enum ath10k_htc_ep_id eid)
+{
+       struct ath10k_htc_ep *ep;
+       int i, n_reports;
+
+       if (len % sizeof(*report))
+               ath10k_warn("Uneven credit report len %d", len);
+
+       n_reports = len / sizeof(*report);
+
+       spin_lock_bh(&htc->tx_lock);
+       for (i = 0; i < n_reports; i++, report++) {
+               if (report->eid >= ATH10K_HTC_EP_COUNT)
+                       break;
+
+               ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
+                          report->eid, report->credits);
+
+               ep = &htc->endpoint[report->eid];
+               ep->tx_credits += report->credits;
+
+               if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
+                       queue_work(htc->ar->workqueue, &ep->send_work);
+       }
+       spin_unlock_bh(&htc->tx_lock);
+}
+
+static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
+                                     u8 *buffer,
+                                     int length,
+                                     enum ath10k_htc_ep_id src_eid)
+{
+       int status = 0;
+       struct ath10k_htc_record *record;
+       u8 *orig_buffer;
+       int orig_length;
+       size_t len;
+
+       orig_buffer = buffer;
+       orig_length = length;
+
+       while (length > 0) {
+               record = (struct ath10k_htc_record *)buffer;
+
+               if (length < sizeof(record->hdr)) {
+                       status = -EINVAL;
+                       break;
+               }
+
+               if (record->hdr.len > length) {
+                       /* no room left in buffer for record */
+                       ath10k_warn("Invalid record length: %d\n",
+                                   record->hdr.len);
+                       status = -EINVAL;
+                       break;
+               }
+
+               switch (record->hdr.id) {
+               case ATH10K_HTC_RECORD_CREDITS:
+                       len = sizeof(struct ath10k_htc_credit_report);
+                       if (record->hdr.len < len) {
+                               ath10k_warn("Credit report too long\n");
+                               status = -EINVAL;
+                               break;
+                       }
+                       ath10k_htc_process_credit_report(htc,
+                                                        record->credit_report,
+                                                        record->hdr.len,
+                                                        src_eid);
+                       break;
+               default:
+                       ath10k_warn("Unhandled record: id:%d length:%d\n",
+                                   record->hdr.id, record->hdr.len);
+                       break;
+               }
+
+               if (status)
+                       break;
+
+               /* multiple records may be present in a trailer */
+               buffer += sizeof(record->hdr) + record->hdr.len;
+               length -= sizeof(record->hdr) + record->hdr.len;
+       }
+
+       if (status)
+               ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "",
+                               orig_buffer, orig_length);
+
+       return status;
+}
+
+static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
+                                           struct sk_buff *skb,
+                                           u8 pipe_id)
+{
+       int status = 0;
+       struct ath10k_htc *htc = ar->htc;
+       struct ath10k_htc_hdr *hdr;
+       struct ath10k_htc_ep *ep;
+       u16 payload_len;
+       u32 trailer_len = 0;
+       size_t min_len;
+       u8 eid;
+       bool trailer_present;
+
+       hdr = (struct ath10k_htc_hdr *)skb->data;
+       skb_pull(skb, sizeof(*hdr));
+
+       eid = hdr->eid;
+
+       if (eid >= ATH10K_HTC_EP_COUNT) {
+               ath10k_warn("HTC Rx: invalid eid %d\n", eid);
+               ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "",
+                               hdr, sizeof(*hdr));
+               status = -EINVAL;
+               goto out;
+       }
+
+       ep = &htc->endpoint[eid];
+
+       /*
+        * If this endpoint that received a message from the target has
+        * a to-target HIF pipe whose send completions are polled rather
+        * than interrupt-driven, this is a good point to ask HIF to check
+        * whether it has any completed sends to handle.
+        */
+       if (ep->ul_is_polled)
+               ath10k_htc_send_complete_check(ep, 1);
+
+       payload_len = __le16_to_cpu(hdr->len);
+
+       if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
+               ath10k_warn("HTC rx frame too long, len: %zu\n",
+                           payload_len + sizeof(*hdr));
+               ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "",
+                               hdr, sizeof(*hdr));
+               status = -EINVAL;
+               goto out;
+       }
+
+       if (skb->len < payload_len) {
+               ath10k_dbg(ATH10K_DBG_HTC,
+                          "HTC Rx: insufficient length, got %d, expected %d\n",
+                          skb->len, payload_len);
+               ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len",
+                               "", hdr, sizeof(*hdr));
+               status = -EINVAL;
+               goto out;
+       }
+
+       /* get flags to check for trailer */
+       trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
+       if (trailer_present) {
+               u8 *trailer;
+
+               trailer_len = hdr->trailer_len;
+               min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
+
+               if ((trailer_len < min_len) ||
+                   (trailer_len > payload_len)) {
+                       ath10k_warn("Invalid trailer length: %d\n",
+                                   trailer_len);
+                       status = -EPROTO;
+                       goto out;
+               }
+
+               trailer = (u8 *)hdr;
+               trailer += sizeof(*hdr);
+               trailer += payload_len;
+               trailer -= trailer_len;
+               status = ath10k_htc_process_trailer(htc, trailer,
+                                                   trailer_len, hdr->eid);
+               if (status)
+                       goto out;
+
+               skb_trim(skb, skb->len - trailer_len);
+       }
+
+       if (((int)payload_len - (int)trailer_len) <= 0)
+               /* zero length packet with trailer data, just drop these */
+               goto out;
+
+       if (eid == ATH10K_HTC_EP_0) {
+               struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
+
+               switch (__le16_to_cpu(msg->hdr.message_id)) {
+               default:
+                       /* handle HTC control message */
+                       if (completion_done(&htc->ctl_resp)) {
+                               /*
+                                * this is a fatal error, target should not be
+                                * sending unsolicited messages on the ep 0
+                                */
+                               ath10k_warn("HTC rx ctrl still processing\n");
+                               status = -EINVAL;
+                               complete(&htc->ctl_resp);
+                               goto out;
+                       }
+
+                       htc->control_resp_len =
+                               min_t(int, skb->len,
+                                     ATH10K_HTC_MAX_CTRL_MSG_LEN);
+
+                       memcpy(htc->control_resp_buffer, skb->data,
+                              htc->control_resp_len);
+
+                       complete(&htc->ctl_resp);
+                       break;
+               case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+                       htc->htc_ops.target_send_suspend_complete(ar);
+               }
+               goto out;
+       }
+
+       ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
+                  eid, skb);
+       ep->ep_ops.ep_rx_complete(ar, skb);
+
+       /* skb is now owned by the rx completion handler */
+       skb = NULL;
+out:
+       kfree_skb(skb);
+
+       return status;
+}
+
+static void ath10k_htc_control_rx_complete(struct ath10k *ar,
+                                          struct sk_buff *skb)
+{
+       /* This is unexpected. FW is not supposed to send regular rx on this
+        * endpoint. */
+       ath10k_warn("unexpected htc rx\n");
+       kfree_skb(skb);
+}
+
+/***************/
+/* Init/Deinit */
+/***************/
+
+static const char *htc_service_name(enum ath10k_htc_svc_id id)
+{
+       switch (id) {
+       case ATH10K_HTC_SVC_ID_RESERVED:
+               return "Reserved";
+       case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+               return "Control";
+       case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+               return "WMI";
+       case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
+               return "DATA BE";
+       case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
+               return "DATA BK";
+       case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
+               return "DATA VI";
+       case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
+               return "DATA VO";
+       case ATH10K_HTC_SVC_ID_NMI_CONTROL:
+               return "NMI Control";
+       case ATH10K_HTC_SVC_ID_NMI_DATA:
+               return "NMI Data";
+       case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+               return "HTT Data";
+       case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
+               return "RAW";
+       }
+
+       return "Unknown";
+}
+
+static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
+{
+       struct ath10k_htc_ep *ep;
+       int i;
+
+       for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
+               ep = &htc->endpoint[i];
+               ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
+               ep->max_ep_message_len = 0;
+               ep->max_tx_queue_depth = 0;
+               ep->eid = i;
+               skb_queue_head_init(&ep->tx_queue);
+               ep->htc = htc;
+               ep->tx_credit_flow_enabled = true;
+               INIT_WORK(&ep->send_work, ath10k_htc_send_work);
+       }
+}
+
+static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
+{
+       struct ath10k_htc_svc_tx_credits *entry;
+
+       entry = &htc->service_tx_alloc[0];
+
+       /*
+        * for PCIE allocate all credists/HTC buffers to WMI.
+        * no buffers are used/required for data. data always
+        * remains on host.
+        */
+       entry++;
+       entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
+       entry->credit_allocation = htc->total_transmit_credits;
+}
+
+static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
+                                          u16 service_id)
+{
+       u8 allocation = 0;
+       int i;
+
+       for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
+               if (htc->service_tx_alloc[i].service_id == service_id)
+                       allocation =
+                           htc->service_tx_alloc[i].credit_allocation;
+       }
+
+       return allocation;
+}
+
+int ath10k_htc_wait_target(struct ath10k_htc *htc)
+{
+       int status = 0;
+       struct ath10k_htc_svc_conn_req conn_req;
+       struct ath10k_htc_svc_conn_resp conn_resp;
+       struct ath10k_htc_msg *msg;
+       u16 message_id;
+       u16 credit_count;
+       u16 credit_size;
+
+       INIT_COMPLETION(htc->ctl_resp);
+
+       status = ath10k_hif_start(htc->ar);
+       if (status) {
+               ath10k_err("could not start HIF (%d)\n", status);
+               goto err_start;
+       }
+
+       status = wait_for_completion_timeout(&htc->ctl_resp,
+                                            ATH10K_HTC_WAIT_TIMEOUT_HZ);
+       if (status <= 0) {
+               if (status == 0)
+                       status = -ETIMEDOUT;
+
+               ath10k_err("ctl_resp never came in (%d)\n", status);
+               goto err_target;
+       }
+
+       if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
+               ath10k_err("Invalid HTC ready msg len:%d\n",
+                          htc->control_resp_len);
+
+               status = -ECOMM;
+               goto err_target;
+       }
+
+       msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+       message_id   = __le16_to_cpu(msg->hdr.message_id);
+       credit_count = __le16_to_cpu(msg->ready.credit_count);
+       credit_size  = __le16_to_cpu(msg->ready.credit_size);
+
+       if (message_id != ATH10K_HTC_MSG_READY_ID) {
+               ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
+               status = -ECOMM;
+               goto err_target;
+       }
+
+       htc->total_transmit_credits = credit_count;
+       htc->target_credit_size = credit_size;
+
+       ath10k_dbg(ATH10K_DBG_HTC,
+                  "Target ready! transmit resources: %d size:%d\n",
+                  htc->total_transmit_credits,
+                  htc->target_credit_size);
+
+       if ((htc->total_transmit_credits == 0) ||
+           (htc->target_credit_size == 0)) {
+               status = -ECOMM;
+               ath10k_err("Invalid credit size received\n");
+               goto err_target;
+       }
+
+       ath10k_htc_setup_target_buffer_assignments(htc);
+
+       /* setup our pseudo HTC control endpoint connection */
+       memset(&conn_req, 0, sizeof(conn_req));
+       memset(&conn_resp, 0, sizeof(conn_resp));
+       conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
+       conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
+       conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
+       conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
+
+       /* connect fake service */
+       status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
+       if (status) {
+               ath10k_err("could not connect to htc service (%d)\n", status);
+               goto err_target;
+       }
+
+       return 0;
+err_target:
+       ath10k_hif_stop(htc->ar);
+err_start:
+       return status;
+}
+
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+                              struct ath10k_htc_svc_conn_req *conn_req,
+                              struct ath10k_htc_svc_conn_resp *conn_resp)
+{
+       struct ath10k_htc_msg *msg;
+       struct ath10k_htc_conn_svc *req_msg;
+       struct ath10k_htc_conn_svc_response resp_msg_dummy;
+       struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
+       enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
+       struct ath10k_htc_ep *ep;
+       struct sk_buff *skb;
+       unsigned int max_msg_size = 0;
+       int length, status;
+       bool disable_credit_flow_ctrl = false;
+       u16 message_id, service_id, flags = 0;
+       u8 tx_alloc = 0;
+
+       /* special case for HTC pseudo control service */
+       if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
+               disable_credit_flow_ctrl = true;
+               assigned_eid = ATH10K_HTC_EP_0;
+               max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
+               memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+               goto setup;
+       }
+
+       tx_alloc = ath10k_htc_get_credit_allocation(htc,
+                                                   conn_req->service_id);
+       if (!tx_alloc)
+               ath10k_warn("HTC Service %s does not allocate target credits\n",
+                           htc_service_name(conn_req->service_id));
+
+       skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+       if (!skb) {
+               ath10k_err("Failed to allocate HTC packet\n");
+               return -ENOMEM;
+       }
+
+       length = sizeof(msg->hdr) + sizeof(msg->connect_service);
+       skb_put(skb, length);
+       memset(skb->data, 0, length);
+
+       msg = (struct ath10k_htc_msg *)skb->data;
+       msg->hdr.message_id =
+               __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
+
+       flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
+
+       req_msg = &msg->connect_service;
+       req_msg->flags = __cpu_to_le16(flags);
+       req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
+       /* Only enable credit flow control for WMI ctrl service */
+       if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
+               flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+               disable_credit_flow_ctrl = true;
+       }
+
+       INIT_COMPLETION(htc->ctl_resp);
+
+       status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+       if (status) {
+               kfree_skb(skb);
+               return status;
+       }
+
+       /* wait for response */
+       status = wait_for_completion_timeout(&htc->ctl_resp,
+                                            ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
+       if (status <= 0) {
+               if (status == 0)
+                       status = -ETIMEDOUT;
+               ath10k_err("Service connect timeout: %d\n", status);
+               return status;
+       }
+
+       /* we controlled the buffer creation, it's aligned */
+       msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+       resp_msg = &msg->connect_service_response;
+       message_id = __le16_to_cpu(msg->hdr.message_id);
+       service_id = __le16_to_cpu(resp_msg->service_id);
+
+       if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+           (htc->control_resp_len < sizeof(msg->hdr) +
+            sizeof(msg->connect_service_response))) {
+               ath10k_err("Invalid resp message ID 0x%x", message_id);
+               return -EPROTO;
+       }
+
+       ath10k_dbg(ATH10K_DBG_HTC,
+                  "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
+                  htc_service_name(service_id),
+                  resp_msg->status, resp_msg->eid);
+
+       conn_resp->connect_resp_code = resp_msg->status;
+
+       /* check response status */
+       if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
+               ath10k_err("HTC Service %s connect request failed: 0x%x)\n",
+                          htc_service_name(service_id),
+                          resp_msg->status);
+               return -EPROTO;
+       }
+
+       assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
+       max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
+
+setup:
+
+       if (assigned_eid >= ATH10K_HTC_EP_COUNT)
+               return -EPROTO;
+
+       if (max_msg_size == 0)
+               return -EPROTO;
+
+       ep = &htc->endpoint[assigned_eid];
+       ep->eid = assigned_eid;
+
+       if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
+               return -EPROTO;
+
+       /* return assigned endpoint to caller */
+       conn_resp->eid = assigned_eid;
+       conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
+
+       /* setup the endpoint */
+       ep->service_id = conn_req->service_id;
+       ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+       ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
+       ep->tx_credits = tx_alloc;
+       ep->tx_credit_size = htc->target_credit_size;
+       ep->tx_credits_per_max_message = ep->max_ep_message_len /
+                                        htc->target_credit_size;
+
+       if (ep->max_ep_message_len % htc->target_credit_size)
+               ep->tx_credits_per_max_message++;
+
+       /* copy all the callbacks */
+       ep->ep_ops = conn_req->ep_ops;
+
+       status = ath10k_hif_map_service_to_pipe(htc->ar,
+                                               ep->service_id,
+                                               &ep->ul_pipe_id,
+                                               &ep->dl_pipe_id,
+                                               &ep->ul_is_polled,
+                                               &ep->dl_is_polled);
+       if (status)
+               return status;
+
+       ath10k_dbg(ATH10K_DBG_HTC,
+                  "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
+                  htc_service_name(ep->service_id), ep->ul_pipe_id,
+                  ep->dl_pipe_id, ep->eid);
+
+       ath10k_dbg(ATH10K_DBG_HTC,
+                  "EP %d UL polled: %d, DL polled: %d\n",
+                  ep->eid, ep->ul_is_polled, ep->dl_is_polled);
+
+       if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+               ep->tx_credit_flow_enabled = false;
+               ath10k_dbg(ATH10K_DBG_HTC,
+                          "HTC service: %s eid: %d TX flow control disabled\n",
+                          htc_service_name(ep->service_id), assigned_eid);
+       }
+
+       return status;
+}
+
+struct sk_buff *ath10k_htc_alloc_skb(int size)
+{
+       struct sk_buff *skb;
+
+       skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
+       if (!skb) {
+               ath10k_warn("could not allocate HTC tx skb\n");
+               return NULL;
+       }
+
+       skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
+
+       /* FW/HTC requires 4-byte aligned streams */
+       if (!IS_ALIGNED((unsigned long)skb->data, 4))
+               ath10k_warn("Unaligned HTC tx skb\n");
+
+       return skb;
+}
+
+int ath10k_htc_start(struct ath10k_htc *htc)
+{
+       struct sk_buff *skb;
+       int status = 0;
+       struct ath10k_htc_msg *msg;
+
+       skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
+       memset(skb->data, 0, skb->len);
+
+       msg = (struct ath10k_htc_msg *)skb->data;
+       msg->hdr.message_id =
+               __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+       ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
+
+       status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+       if (status) {
+               kfree_skb(skb);
+               return status;
+       }
+
+       return 0;
+}
+
+/*
+ * stop HTC communications, i.e. stop interrupt reception, and flush all
+ * queued buffers
+ */
+void ath10k_htc_stop(struct ath10k_htc *htc)
+{
+       int i;
+       struct ath10k_htc_ep *ep;
+
+       spin_lock_bh(&htc->tx_lock);
+       htc->stopping = true;
+       spin_unlock_bh(&htc->tx_lock);
+
+       for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
+               ep = &htc->endpoint[i];
+               ath10k_htc_flush_endpoint_tx(htc, ep);
+       }
+
+       ath10k_hif_stop(htc->ar);
+       ath10k_htc_reset_endpoint_states(htc);
+}
+
+/* registered target arrival callback from the HIF layer */
+struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
+                                    struct ath10k_htc_ops *htc_ops)
+{
+       struct ath10k_hif_cb htc_callbacks;
+       struct ath10k_htc_ep *ep = NULL;
+       struct ath10k_htc *htc = NULL;
+
+       /* FIXME: use struct ath10k instead */
+       htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL);
+       if (!htc)
+               return ERR_PTR(-ENOMEM);
+
+       spin_lock_init(&htc->tx_lock);
+
+       memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops));
+
+       ath10k_htc_reset_endpoint_states(htc);
+
+       /* setup HIF layer callbacks */
+       htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
+       htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
+       htc->ar = ar;
+
+       /* Get HIF default pipe for HTC message exchange */
+       ep = &htc->endpoint[ATH10K_HTC_EP_0];
+
+       ath10k_hif_init(ar, &htc_callbacks);
+       ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
+
+       init_completion(&htc->ctl_resp);
+
+       return htc;
+}
+
+void ath10k_htc_destroy(struct ath10k_htc *htc)
+{
+       kfree(htc);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
new file mode 100644 (file)
index 0000000..fa45844
--- /dev/null
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HTC_H_
+#define _HTC_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/semaphore.h>
+#include <linux/timer.h>
+
+struct ath10k;
+
+/****************/
+/* HTC protocol */
+/****************/
+
+/*
+ * HTC - host-target control protocol
+ *
+ * tx packets are generally <htc_hdr><payload>
+ * rx packets are more complex: <htc_hdr><payload><trailer>
+ *
+ * The payload + trailer length is stored in len.
+ * To get payload-only length one needs to payload - trailer_len.
+ *
+ * Trailer contains (possibly) multiple <htc_record>.
+ * Each record is a id-len-value.
+ *
+ * HTC header flags, control_byte0, control_byte1
+ * have different meaning depending whether its tx
+ * or rx.
+ *
+ * Alignment: htc_hdr, payload and trailer are
+ * 4-byte aligned.
+ */
+
+enum ath10k_htc_tx_flags {
+       ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+       ATH10K_HTC_FLAG_SEND_BUNDLE        = 0x02
+};
+
+enum ath10k_htc_rx_flags {
+       ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+       ATH10K_HTC_FLAG_BUNDLE_MASK     = 0xF0
+};
+
+struct ath10k_htc_hdr {
+       u8 eid; /* @enum ath10k_htc_ep_id */
+       u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
+       __le16 len;
+       union {
+               u8 trailer_len; /* for rx */
+               u8 control_byte0;
+       } __packed;
+       union {
+               u8 seq_no; /* for tx */
+               u8 control_byte1;
+       } __packed;
+       u8 pad0;
+       u8 pad1;
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_msg_id {
+       ATH10K_HTC_MSG_READY_ID                = 1,
+       ATH10K_HTC_MSG_CONNECT_SERVICE_ID      = 2,
+       ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+       ATH10K_HTC_MSG_SETUP_COMPLETE_ID       = 4,
+       ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID    = 5,
+       ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE   = 6
+};
+
+enum ath10k_htc_version {
+       ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+       ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+enum ath10k_htc_conn_flags {
+       ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH    = 0x0,
+       ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF      = 0x1,
+       ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+       ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY         = 0x3,
+#define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3
+       ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE    = 1 << 2,
+       ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB  8
+};
+
+enum ath10k_htc_conn_svc_status {
+       ATH10K_HTC_CONN_SVC_STATUS_SUCCESS      = 0,
+       ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND    = 1,
+       ATH10K_HTC_CONN_SVC_STATUS_FAILED       = 2,
+       ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+       ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP   = 4
+};
+
+struct ath10k_ath10k_htc_msg_hdr {
+       __le16 message_id; /* @enum htc_message_id */
+} __packed;
+
+struct ath10k_htc_unknown {
+       u8 pad0;
+       u8 pad1;
+} __packed;
+
+struct ath10k_htc_ready {
+       __le16 credit_count;
+       __le16 credit_size;
+       u8 max_endpoints;
+       u8 pad0;
+} __packed;
+
+struct ath10k_htc_ready_extended {
+       struct ath10k_htc_ready base;
+       u8 htc_version; /* @enum ath10k_htc_version */
+       u8 max_msgs_per_htc_bundle;
+       u8 pad0;
+       u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc {
+       __le16 service_id;
+       __le16 flags; /* @enum ath10k_htc_conn_flags */
+       u8 pad0;
+       u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc_response {
+       __le16 service_id;
+       u8 status; /* @enum ath10k_htc_conn_svc_status */
+       u8 eid;
+       __le16 max_msg_size;
+} __packed;
+
+struct ath10k_htc_setup_complete_extended {
+       u8 pad0;
+       u8 pad1;
+       __le32 flags; /* @enum htc_setup_complete_flags */
+       u8 max_msgs_per_bundled_recv;
+       u8 pad2;
+       u8 pad3;
+       u8 pad4;
+} __packed;
+
+struct ath10k_htc_msg {
+       struct ath10k_ath10k_htc_msg_hdr hdr;
+       union {
+               /* host-to-target */
+               struct ath10k_htc_conn_svc connect_service;
+               struct ath10k_htc_ready ready;
+               struct ath10k_htc_ready_extended ready_ext;
+               struct ath10k_htc_unknown unknown;
+               struct ath10k_htc_setup_complete_extended setup_complete_ext;
+
+               /* target-to-host */
+               struct ath10k_htc_conn_svc_response connect_service_response;
+       };
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_record_id {
+       ATH10K_HTC_RECORD_NULL    = 0,
+       ATH10K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath10k_ath10k_htc_record_hdr {
+       u8 id; /* @enum ath10k_ath10k_htc_record_id */
+       u8 len;
+       u8 pad0;
+       u8 pad1;
+} __packed;
+
+struct ath10k_htc_credit_report {
+       u8 eid; /* @enum ath10k_htc_ep_id */
+       u8 credits;
+       u8 pad0;
+       u8 pad1;
+} __packed;
+
+struct ath10k_htc_record {
+       struct ath10k_ath10k_htc_record_hdr hdr;
+       union {
+               struct ath10k_htc_credit_report credit_report[0];
+               u8 pauload[0];
+       };
+} __packed __aligned(4);
+
+/*
+ * note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ */
+struct ath10k_htc_frame {
+       struct ath10k_htc_hdr hdr;
+       union {
+               struct ath10k_htc_msg msg;
+               u8 payload[0];
+       };
+       struct ath10k_htc_record trailer[0];
+} __packed __aligned(4);
+
+
+/*******************/
+/* Host-side stuff */
+/*******************/
+
+enum ath10k_htc_svc_gid {
+       ATH10K_HTC_SVC_GRP_RSVD = 0,
+       ATH10K_HTC_SVC_GRP_WMI = 1,
+       ATH10K_HTC_SVC_GRP_NMI = 2,
+       ATH10K_HTC_SVC_GRP_HTT = 3,
+
+       ATH10K_HTC_SVC_GRP_TEST = 254,
+       ATH10K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+       (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath10k_htc_svc_id {
+       /* NOTE: service ID of 0x0000 is reserved and should never be used */
+       ATH10K_HTC_SVC_ID_RESERVED      = 0x0000,
+       ATH10K_HTC_SVC_ID_UNUSED        = ATH10K_HTC_SVC_ID_RESERVED,
+
+       ATH10K_HTC_SVC_ID_RSVD_CTRL     = SVC(ATH10K_HTC_SVC_GRP_RSVD, 1),
+       ATH10K_HTC_SVC_ID_WMI_CONTROL   = SVC(ATH10K_HTC_SVC_GRP_WMI, 0),
+       ATH10K_HTC_SVC_ID_WMI_DATA_BE   = SVC(ATH10K_HTC_SVC_GRP_WMI, 1),
+       ATH10K_HTC_SVC_ID_WMI_DATA_BK   = SVC(ATH10K_HTC_SVC_GRP_WMI, 2),
+       ATH10K_HTC_SVC_ID_WMI_DATA_VI   = SVC(ATH10K_HTC_SVC_GRP_WMI, 3),
+       ATH10K_HTC_SVC_ID_WMI_DATA_VO   = SVC(ATH10K_HTC_SVC_GRP_WMI, 4),
+
+       ATH10K_HTC_SVC_ID_NMI_CONTROL   = SVC(ATH10K_HTC_SVC_GRP_NMI, 0),
+       ATH10K_HTC_SVC_ID_NMI_DATA      = SVC(ATH10K_HTC_SVC_GRP_NMI, 1),
+
+       ATH10K_HTC_SVC_ID_HTT_DATA_MSG  = SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
+
+       /* raw stream service (i.e. flash, tcmd, calibration apps) */
+       ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
+};
+
+#undef SVC
+
+enum ath10k_htc_ep_id {
+       ATH10K_HTC_EP_UNUSED = -1,
+       ATH10K_HTC_EP_0 = 0,
+       ATH10K_HTC_EP_1 = 1,
+       ATH10K_HTC_EP_2,
+       ATH10K_HTC_EP_3,
+       ATH10K_HTC_EP_4,
+       ATH10K_HTC_EP_5,
+       ATH10K_HTC_EP_6,
+       ATH10K_HTC_EP_7,
+       ATH10K_HTC_EP_8,
+       ATH10K_HTC_EP_COUNT,
+};
+
+struct ath10k_htc_ops {
+       void (*target_send_suspend_complete)(struct ath10k *ar);
+};
+
+struct ath10k_htc_ep_ops {
+       void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
+       void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+};
+
+/* service connection information */
+struct ath10k_htc_svc_conn_req {
+       u16 service_id;
+       struct ath10k_htc_ep_ops ep_ops;
+       int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath10k_htc_svc_conn_resp {
+       u8 buffer_len;
+       u8 actual_len;
+       enum ath10k_htc_ep_id eid;
+       unsigned int max_msg_len;
+       u8 connect_resp_code;
+};
+
+#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH10K_HTC_MAX_LEN 4096
+#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
+                                       sizeof(struct ath10k_htc_hdr))
+#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
+
+struct ath10k_htc_ep {
+       struct ath10k_htc *htc;
+       enum ath10k_htc_ep_id eid;
+       enum ath10k_htc_svc_id service_id;
+       struct ath10k_htc_ep_ops ep_ops;
+
+       int max_tx_queue_depth;
+       int max_ep_message_len;
+       u8 ul_pipe_id;
+       u8 dl_pipe_id;
+       int ul_is_polled; /* call HIF to get tx completions */
+       int dl_is_polled; /* call HIF to fetch rx (not implemented) */
+
+       struct sk_buff_head tx_queue;
+
+       u8 seq_no; /* for debugging */
+       int tx_credits;
+       int tx_credit_size;
+       int tx_credits_per_max_message;
+       bool tx_credit_flow_enabled;
+
+       struct work_struct send_work;
+};
+
+struct ath10k_htc_svc_tx_credits {
+       u16 service_id;
+       u8  credit_allocation;
+};
+
+struct ath10k_htc {
+       struct ath10k *ar;
+       struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
+
+       /* protects endpoint and stopping fields */
+       spinlock_t tx_lock;
+
+       struct ath10k_htc_ops htc_ops;
+
+       u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN];
+       int control_resp_len;
+
+       struct completion ctl_resp;
+
+       int total_transmit_credits;
+       struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
+       int target_credit_size;
+
+       bool stopping;
+};
+
+struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
+                                    struct ath10k_htc_ops *htc_ops);
+int ath10k_htc_wait_target(struct ath10k_htc *htc);
+int ath10k_htc_start(struct ath10k_htc *htc);
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+                              struct ath10k_htc_svc_conn_req  *conn_req,
+                              struct ath10k_htc_svc_conn_resp *conn_resp);
+int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
+                   struct sk_buff *packet);
+void ath10k_htc_stop(struct ath10k_htc *htc);
+void ath10k_htc_destroy(struct ath10k_htc *htc);
+struct sk_buff *ath10k_htc_alloc_skb(int size);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
new file mode 100644 (file)
index 0000000..185a546
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+
+#include "htt.h"
+#include "core.h"
+#include "debug.h"
+
+static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
+{
+       struct ath10k_htc_svc_conn_req conn_req;
+       struct ath10k_htc_svc_conn_resp conn_resp;
+       int status;
+
+       memset(&conn_req, 0, sizeof(conn_req));
+       memset(&conn_resp, 0, sizeof(conn_resp));
+
+       conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
+       conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
+
+       /* connect to control service */
+       conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
+
+       status = ath10k_htc_connect_service(htt->ar->htc, &conn_req,
+                                           &conn_resp);
+
+       if (status)
+               return status;
+
+       htt->eid = conn_resp.eid;
+
+       return 0;
+}
+
+struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
+{
+       struct ath10k_htt *htt;
+       int ret;
+
+       htt = kzalloc(sizeof(*htt), GFP_KERNEL);
+       if (!htt)
+               return NULL;
+
+       htt->ar = ar;
+       htt->max_throughput_mbps = 800;
+
+       /*
+        * Connect to HTC service.
+        * This has to be done before calling ath10k_htt_rx_attach,
+        * since ath10k_htt_rx_attach involves sending a rx ring configure
+        * message to the target.
+        */
+       if (ath10k_htt_htc_attach(htt))
+               goto err_htc_attach;
+
+       ret = ath10k_htt_tx_attach(htt);
+       if (ret) {
+               ath10k_err("could not attach htt tx (%d)\n", ret);
+               goto err_htc_attach;
+       }
+
+       if (ath10k_htt_rx_attach(htt))
+               goto err_rx_attach;
+
+       /*
+        * Prefetch enough data to satisfy target
+        * classification engine.
+        * This is for LL chips. HL chips will probably
+        * transfer all frame in the tx fragment.
+        */
+       htt->prefetch_len =
+               36 + /* 802.11 + qos + ht */
+               4 + /* 802.1q */
+               8 + /* llc snap */
+               2; /* ip4 dscp or ip6 priority */
+
+       return htt;
+
+err_rx_attach:
+       ath10k_htt_tx_detach(htt);
+err_htc_attach:
+       kfree(htt);
+       return NULL;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
+
+static int ath10k_htt_verify_version(struct ath10k_htt *htt)
+{
+       ath10k_dbg(ATH10K_DBG_HTT,
+                  "htt target version %d.%d; host version %d.%d\n",
+                   htt->target_version_major,
+                   htt->target_version_minor,
+                   HTT_CURRENT_VERSION_MAJOR,
+                   HTT_CURRENT_VERSION_MINOR);
+
+       if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
+               ath10k_err("htt major versions are incompatible!\n");
+               return -ENOTSUPP;
+       }
+
+       if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
+               ath10k_warn("htt minor version differ but still compatible\n");
+
+       return 0;
+}
+
+int ath10k_htt_attach_target(struct ath10k_htt *htt)
+{
+       int status;
+
+       init_completion(&htt->target_version_received);
+
+       status = ath10k_htt_h2t_ver_req_msg(htt);
+       if (status)
+               return status;
+
+       status = wait_for_completion_timeout(&htt->target_version_received,
+                                               HTT_TARGET_VERSION_TIMEOUT_HZ);
+       if (status <= 0) {
+               ath10k_warn("htt version request timed out\n");
+               return -ETIMEDOUT;
+       }
+
+       status = ath10k_htt_verify_version(htt);
+       if (status)
+               return status;
+
+       return ath10k_htt_send_rx_ring_cfg_ll(htt);
+}
+
+void ath10k_htt_detach(struct ath10k_htt *htt)
+{
+       ath10k_htt_rx_detach(htt);
+       ath10k_htt_tx_detach(htt);
+       kfree(htt);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
new file mode 100644 (file)
index 0000000..a7a7aa0
--- /dev/null
@@ -0,0 +1,1338 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HTT_H_
+#define _HTT_H_
+
+#include <linux/bug.h>
+
+#include "core.h"
+#include "htc.h"
+#include "rx_desc.h"
+
+#define HTT_CURRENT_VERSION_MAJOR      2
+#define HTT_CURRENT_VERSION_MINOR      1
+
+enum htt_dbg_stats_type {
+       HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
+       HTT_DBG_STATS_RX_REORDER    = 1 << 1,
+       HTT_DBG_STATS_RX_RATE_INFO  = 1 << 2,
+       HTT_DBG_STATS_TX_PPDU_LOG   = 1 << 3,
+       HTT_DBG_STATS_TX_RATE_INFO  = 1 << 4,
+       /* bits 5-23 currently reserved */
+
+       HTT_DBG_NUM_STATS /* keep this last */
+};
+
+enum htt_h2t_msg_type { /* host-to-target */
+       HTT_H2T_MSG_TYPE_VERSION_REQ        = 0,
+       HTT_H2T_MSG_TYPE_TX_FRM             = 1,
+       HTT_H2T_MSG_TYPE_RX_RING_CFG        = 2,
+       HTT_H2T_MSG_TYPE_STATS_REQ          = 3,
+       HTT_H2T_MSG_TYPE_SYNC               = 4,
+       HTT_H2T_MSG_TYPE_AGGR_CFG           = 5,
+       HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+       HTT_H2T_MSG_TYPE_MGMT_TX            = 7,
+
+       HTT_H2T_NUM_MSGS /* keep this last */
+};
+
+struct htt_cmd_hdr {
+       u8 msg_type;
+} __packed;
+
+struct htt_ver_req {
+       u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+} __packed;
+
+/*
+ * HTT tx MSDU descriptor
+ *
+ * The HTT tx MSDU descriptor is created by the host HTT SW for each
+ * tx MSDU.  The HTT tx MSDU descriptor contains the information that
+ * the target firmware needs for the FW's tx processing, particularly
+ * for creating the HW msdu descriptor.
+ * The same HTT tx descriptor is used for HL and LL systems, though
+ * a few fields within the tx descriptor are used only by LL or
+ * only by HL.
+ * The HTT tx descriptor is defined in two manners: by a struct with
+ * bitfields, and by a series of [dword offset, bit mask, bit shift]
+ * definitions.
+ * The target should use the struct def, for simplicitly and clarity,
+ * but the host shall use the bit-mast + bit-shift defs, to be endian-
+ * neutral.  Specifically, the host shall use the get/set macros built
+ * around the mask + shift defs.
+ */
+struct htt_data_tx_desc_frag {
+       __le32 paddr;
+       __le32 len;
+} __packed;
+
+enum htt_data_tx_desc_flags0 {
+       HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
+       HTT_DATA_TX_DESC_FLAGS0_NO_AGGR         = 1 << 1,
+       HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT      = 1 << 2,
+       HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY     = 1 << 3,
+       HTT_DATA_TX_DESC_FLAGS0_RSVD0           = 1 << 4
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
+};
+
+enum htt_data_tx_desc_flags1 {
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB  0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB  6
+       HTT_DATA_TX_DESC_FLAGS1_POSTPONED        = 1 << 11,
+       HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH    = 1 << 12,
+       HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
+       HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
+       HTT_DATA_TX_DESC_FLAGS1_RSVD1            = 1 << 15
+};
+
+enum htt_data_tx_ext_tid {
+       HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
+       HTT_DATA_TX_EXT_TID_MGMT                = 17,
+       HTT_DATA_TX_EXT_TID_INVALID             = 31
+};
+
+#define HTT_INVALID_PEERID 0xFFFF
+
+/*
+ * htt_data_tx_desc - used for data tx path
+ *
+ * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
+ *       ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
+ *                for special kinds of tids
+ *       postponed: only for HL hosts. indicates if this is a resend
+ *                  (HL hosts manage queues on the host )
+ *       more_in_batch: only for HL hosts. indicates if more packets are
+ *                      pending. this allows target to wait and aggregate
+ */
+struct htt_data_tx_desc {
+       u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+       __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+       __le16 len;
+       __le16 id;
+       __le32 frags_paddr;
+       __le32 peerid;
+       u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
+enum htt_rx_ring_flags {
+       HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
+       HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
+       HTT_RX_RING_FLAGS_PPDU_START   = 1 << 2,
+       HTT_RX_RING_FLAGS_PPDU_END     = 1 << 3,
+       HTT_RX_RING_FLAGS_MPDU_START   = 1 << 4,
+       HTT_RX_RING_FLAGS_MPDU_END     = 1 << 5,
+       HTT_RX_RING_FLAGS_MSDU_START   = 1 << 6,
+       HTT_RX_RING_FLAGS_MSDU_END     = 1 << 7,
+       HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
+       HTT_RX_RING_FLAGS_FRAG_INFO    = 1 << 9,
+       HTT_RX_RING_FLAGS_UNICAST_RX   = 1 << 10,
+       HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
+       HTT_RX_RING_FLAGS_CTRL_RX      = 1 << 12,
+       HTT_RX_RING_FLAGS_MGMT_RX      = 1 << 13,
+       HTT_RX_RING_FLAGS_NULL_RX      = 1 << 14,
+       HTT_RX_RING_FLAGS_PHY_DATA_RX  = 1 << 15
+};
+
+struct htt_rx_ring_setup_ring {
+       __le32 fw_idx_shadow_reg_paddr;
+       __le32 rx_ring_base_paddr;
+       __le16 rx_ring_len; /* in 4-byte words */
+       __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+       __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+       __le16 fw_idx_init_val;
+
+       /* the following offsets are in 4-byte units */
+       __le16 mac80211_hdr_offset;
+       __le16 msdu_payload_offset;
+       __le16 ppdu_start_offset;
+       __le16 ppdu_end_offset;
+       __le16 mpdu_start_offset;
+       __le16 mpdu_end_offset;
+       __le16 msdu_start_offset;
+       __le16 msdu_end_offset;
+       __le16 rx_attention_offset;
+       __le16 frag_info_offset;
+} __packed;
+
+struct htt_rx_ring_setup_hdr {
+       u8 num_rings; /* supported values: 1, 2 */
+       __le16 rsvd0;
+} __packed;
+
+struct htt_rx_ring_setup {
+       struct htt_rx_ring_setup_hdr hdr;
+       struct htt_rx_ring_setup_ring rings[0];
+} __packed;
+
+/*
+ * htt_stats_req - request target to send specified statistics
+ *
+ * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
+ * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
+ *     so make sure its little-endian.
+ * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
+ *     so make sure its little-endian.
+ * @cfg_val: stat_type specific configuration
+ * @stat_type: see %htt_dbg_stats_type
+ * @cookie_lsb: used for confirmation message from target->host
+ * @cookie_msb: ditto as %cookie
+ */
+struct htt_stats_req {
+       u8 upload_types[3];
+       u8 rsvd0;
+       u8 reset_types[3];
+       struct {
+               u8 mpdu_bytes;
+               u8 mpdu_num_msdus;
+               u8 msdu_bytes;
+       } __packed;
+       u8 stat_type;
+       __le32 cookie_lsb;
+       __le32 cookie_msb;
+} __packed;
+
+#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
+
+/*
+ * htt_oob_sync_req - request out-of-band sync
+ *
+ * The HTT SYNC tells the target to suspend processing of subsequent
+ * HTT host-to-target messages until some other target agent locally
+ * informs the target HTT FW that the current sync counter is equal to
+ * or greater than (in a modulo sense) the sync counter specified in
+ * the SYNC message.
+ *
+ * This allows other host-target components to synchronize their operation
+ * with HTT, e.g. to ensure that tx frames don't get transmitted until a
+ * security key has been downloaded to and activated by the target.
+ * In the absence of any explicit synchronization counter value
+ * specification, the target HTT FW will use zero as the default current
+ * sync value.
+ *
+ * The HTT target FW will suspend its host->target message processing as long
+ * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
+ */
+struct htt_oob_sync_req {
+       u8 sync_count;
+       __le16 rsvd0;
+} __packed;
+
+#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_MASK 0x1F
+#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_LSB  0
+
+struct htt_aggr_conf {
+       u8 max_num_ampdu_subframes;
+       union {
+               /* dont use bitfields; undefined behaviour */
+               u8 flags; /* see %HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_ */
+               u8 max_num_amsdu_subframes:5;
+       } __packed;
+} __packed;
+
+#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+
+struct htt_mgmt_tx_desc {
+       u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+       __le32 msdu_paddr;
+       __le32 desc_id;
+       __le32 len;
+       __le32 vdev_id;
+       u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+} __packed;
+
+enum htt_mgmt_tx_status {
+       HTT_MGMT_TX_STATUS_OK    = 0,
+       HTT_MGMT_TX_STATUS_RETRY = 1,
+       HTT_MGMT_TX_STATUS_DROP  = 2
+};
+
+/*=== target -> host messages ===============================================*/
+
+
+enum htt_t2h_msg_type {
+       HTT_T2H_MSG_TYPE_VERSION_CONF           = 0x0,
+       HTT_T2H_MSG_TYPE_RX_IND                 = 0x1,
+       HTT_T2H_MSG_TYPE_RX_FLUSH               = 0x2,
+       HTT_T2H_MSG_TYPE_PEER_MAP               = 0x3,
+       HTT_T2H_MSG_TYPE_PEER_UNMAP             = 0x4,
+       HTT_T2H_MSG_TYPE_RX_ADDBA               = 0x5,
+       HTT_T2H_MSG_TYPE_RX_DELBA               = 0x6,
+       HTT_T2H_MSG_TYPE_TX_COMPL_IND           = 0x7,
+       HTT_T2H_MSG_TYPE_PKTLOG                 = 0x8,
+       HTT_T2H_MSG_TYPE_STATS_CONF             = 0x9,
+       HTT_T2H_MSG_TYPE_RX_FRAG_IND            = 0xa,
+       HTT_T2H_MSG_TYPE_SEC_IND                = 0xb,
+       HTT_T2H_MSG_TYPE_RC_UPDATE_IND          = 0xc,
+       HTT_T2H_MSG_TYPE_TX_INSPECT_IND         = 0xd,
+       HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION     = 0xe,
+       HTT_T2H_MSG_TYPE_TEST,
+       /* keep this last */
+       HTT_T2H_NUM_MSGS
+};
+
+/*
+ * htt_resp_hdr - header for target-to-host messages
+ *
+ * msg_type: see htt_t2h_msg_type
+ */
+struct htt_resp_hdr {
+       u8 msg_type;
+} __packed;
+
+#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
+#define HTT_RESP_HDR_MSG_TYPE_MASK   0xff
+#define HTT_RESP_HDR_MSG_TYPE_LSB    0
+
+/* htt_ver_resp - response sent for htt_ver_req */
+struct htt_ver_resp {
+       u8 minor;
+       u8 major;
+       u8 rsvd0;
+} __packed;
+
+struct htt_mgmt_tx_completion {
+       u8 rsvd0;
+       u8 rsvd1;
+       u8 rsvd2;
+       __le32 desc_id;
+       __le32 status;
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK  (0x3F)
+#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB   (0)
+#define HTT_RX_INDICATION_INFO0_FLUSH_VALID   (1 << 6)
+#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7)
+
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK   0x0000003F
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB    0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK     0x00000FC0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB      6
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB  12
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK   0x00FC0000
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB    18
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK     0xFF000000
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB      24
+
+struct htt_rx_indication_hdr {
+       u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
+       __le16 peer_id;
+       __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID    (1 << 0)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB  (1)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK  (1 << 5)
+#define HTT_RX_INDICATION_INFO0_END_VALID        (1 << 6)
+#define HTT_RX_INDICATION_INFO0_START_VALID      (1 << 7)
+
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK    0x00FFFFFF
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB     0
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB  24
+
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB  0
+#define HTT_RX_INDICATION_INFO2_SERVICE_MASK    0xFF000000
+#define HTT_RX_INDICATION_INFO2_SERVICE_LSB     24
+
+enum htt_rx_legacy_rate {
+       HTT_RX_OFDM_48 = 0,
+       HTT_RX_OFDM_24 = 1,
+       HTT_RX_OFDM_12,
+       HTT_RX_OFDM_6,
+       HTT_RX_OFDM_54,
+       HTT_RX_OFDM_36,
+       HTT_RX_OFDM_18,
+       HTT_RX_OFDM_9,
+
+       /* long preamble */
+       HTT_RX_CCK_11_LP = 0,
+       HTT_RX_CCK_5_5_LP = 1,
+       HTT_RX_CCK_2_LP,
+       HTT_RX_CCK_1_LP,
+       /* short preamble */
+       HTT_RX_CCK_11_SP,
+       HTT_RX_CCK_5_5_SP,
+       HTT_RX_CCK_2_SP
+};
+
+enum htt_rx_legacy_rate_type {
+       HTT_RX_LEGACY_RATE_OFDM = 0,
+       HTT_RX_LEGACY_RATE_CCK
+};
+
+enum htt_rx_preamble_type {
+       HTT_RX_LEGACY        = 0x4,
+       HTT_RX_HT            = 0x8,
+       HTT_RX_HT_WITH_TXBF  = 0x9,
+       HTT_RX_VHT           = 0xC,
+       HTT_RX_VHT_WITH_TXBF = 0xD,
+};
+
+/*
+ * Fields: phy_err_valid, phy_err_code, tsf,
+ * usec_timestamp, sub_usec_timestamp
+ * ..are valid only if end_valid == 1.
+ *
+ * Fields: rssi_chains, legacy_rate_type,
+ * legacy_rate_cck, preamble_type, service,
+ * vht_sig_*
+ * ..are valid only if start_valid == 1;
+ */
+struct htt_rx_indication_ppdu {
+       u8 combined_rssi;
+       u8 sub_usec_timestamp;
+       u8 phy_err_code;
+       u8 info0; /* HTT_RX_INDICATION_INFO0_ */
+       struct {
+               u8 pri20_db;
+               u8 ext20_db;
+               u8 ext40_db;
+               u8 ext80_db;
+       } __packed rssi_chains[4];
+       __le32 tsf;
+       __le32 usec_timestamp;
+       __le32 info1; /* HTT_RX_INDICATION_INFO1_ */
+       __le32 info2; /* HTT_RX_INDICATION_INFO2_ */
+} __packed;
+
+enum htt_rx_mpdu_status {
+       HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
+       HTT_RX_IND_MPDU_STATUS_OK,
+       HTT_RX_IND_MPDU_STATUS_ERR_FCS,
+       HTT_RX_IND_MPDU_STATUS_ERR_DUP,
+       HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
+       HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
+       /* only accept EAPOL frames */
+       HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
+       HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
+       /* Non-data in promiscous mode */
+       HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
+       HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
+       HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
+       HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
+       HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
+       HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
+
+       /*
+        * MISC: discard for unspecified reasons.
+        * Leave this enum value last.
+        */
+       HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
+};
+
+struct htt_rx_indication_mpdu_range {
+       u8 mpdu_count;
+       u8 mpdu_range_status; /* %htt_rx_mpdu_status */
+       u8 pad0;
+       u8 pad1;
+} __packed;
+
+struct htt_rx_indication_prefix {
+       __le16 fw_rx_desc_bytes;
+       u8 pad0;
+       u8 pad1;
+};
+
+struct htt_rx_indication {
+       struct htt_rx_indication_hdr hdr;
+       struct htt_rx_indication_ppdu ppdu;
+       struct htt_rx_indication_prefix prefix;
+
+       /*
+        * the following fields are both dynamically sized, so
+        * take care addressing them
+        */
+
+       /* the size of this is %fw_rx_desc_bytes */
+       struct fw_rx_desc_base fw_desc;
+
+       /*
+        * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
+        * and has %num_mpdu_ranges elements.
+        */
+       struct htt_rx_indication_mpdu_range mpdu_ranges[0];
+} __packed;
+
+static inline struct htt_rx_indication_mpdu_range *
+               htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
+{
+       void *ptr = rx_ind;
+
+       ptr += sizeof(rx_ind->hdr)
+            + sizeof(rx_ind->ppdu)
+            + sizeof(rx_ind->prefix)
+            + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
+       return ptr;
+}
+
+enum htt_rx_flush_mpdu_status {
+       HTT_RX_FLUSH_MPDU_DISCARD = 0,
+       HTT_RX_FLUSH_MPDU_REORDER = 1,
+};
+
+/*
+ * htt_rx_flush - discard or reorder given range of mpdus
+ *
+ * Note: host must check if all sequence numbers between
+ *     [seq_num_start, seq_num_end-1] are valid.
+ */
+struct htt_rx_flush {
+       __le16 peer_id;
+       u8 tid;
+       u8 rsvd0;
+       u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
+       u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
+       u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
+};
+
+struct htt_rx_peer_map {
+       u8 vdev_id;
+       __le16 peer_id;
+       u8 addr[6];
+       u8 rsvd0;
+       u8 rsvd1;
+} __packed;
+
+struct htt_rx_peer_unmap {
+       u8 rsvd0;
+       __le16 peer_id;
+} __packed;
+
+enum htt_security_types {
+       HTT_SECURITY_NONE,
+       HTT_SECURITY_WEP128,
+       HTT_SECURITY_WEP104,
+       HTT_SECURITY_WEP40,
+       HTT_SECURITY_TKIP,
+       HTT_SECURITY_TKIP_NOMIC,
+       HTT_SECURITY_AES_CCMP,
+       HTT_SECURITY_WAPI,
+
+       HTT_NUM_SECURITY_TYPES /* keep this last! */
+};
+
+enum htt_security_flags {
+#define HTT_SECURITY_TYPE_MASK 0x7F
+#define HTT_SECURITY_TYPE_LSB  0
+       HTT_SECURITY_IS_UNICAST = 1 << 7
+};
+
+struct htt_security_indication {
+       union {
+               /* dont use bitfields; undefined behaviour */
+               u8 flags; /* %htt_security_flags */
+               struct {
+                       u8 security_type:7, /* %htt_security_types */
+                          is_unicast:1;
+               } __packed;
+       } __packed;
+       __le16 peer_id;
+       u8 michael_key[8];
+       u8 wapi_rsc[16];
+} __packed;
+
+#define HTT_RX_BA_INFO0_TID_MASK     0x000F
+#define HTT_RX_BA_INFO0_TID_LSB      0
+#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
+#define HTT_RX_BA_INFO0_PEER_ID_LSB  4
+
+struct htt_rx_addba {
+       u8 window_size;
+       __le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+struct htt_rx_delba {
+       u8 rsvd0;
+       __le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+enum htt_data_tx_status {
+       HTT_DATA_TX_STATUS_OK            = 0,
+       HTT_DATA_TX_STATUS_DISCARD       = 1,
+       HTT_DATA_TX_STATUS_NO_ACK        = 2,
+       HTT_DATA_TX_STATUS_POSTPONE      = 3, /* HL only */
+       HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128
+};
+
+enum htt_data_tx_flags {
+#define HTT_DATA_TX_STATUS_MASK 0x07
+#define HTT_DATA_TX_STATUS_LSB  0
+#define HTT_DATA_TX_TID_MASK    0x78
+#define HTT_DATA_TX_TID_LSB     3
+       HTT_DATA_TX_TID_INVALID = 1 << 7
+};
+
+#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
+
+struct htt_data_tx_completion {
+       union {
+               u8 flags;
+               struct {
+                       u8 status:3,
+                          tid:4,
+                          tid_invalid:1;
+               } __packed;
+       } __packed;
+       u8 num_msdus;
+       u8 rsvd0;
+       __le16 msdus[0]; /* variable length based on %num_msdus */
+} __packed;
+
+struct htt_tx_compl_ind_base {
+       u32 hdr;
+       u16 payload[1/*or more*/];
+} __packed;
+
+struct htt_rc_tx_done_params {
+       u32 rate_code;
+       u32 rate_code_flags;
+       u32 flags;
+       u32 num_enqued; /* 1 for non-AMPDU */
+       u32 num_retries;
+       u32 num_failed; /* for AMPDU */
+       u32 ack_rssi;
+       u32 time_stamp;
+       u32 is_probe;
+};
+
+struct htt_rc_update {
+       u8 vdev_id;
+       __le16 peer_id;
+       u8 addr[6];
+       u8 num_elems;
+       u8 rsvd0;
+       struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */
+} __packed;
+
+/* see htt_rx_indication for similar fields and descriptions */
+struct htt_rx_fragment_indication {
+       union {
+               u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
+               struct {
+                       u8 ext_tid:5,
+                          flush_valid:1;
+               } __packed;
+       } __packed;
+       __le16 peer_id;
+       __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
+       __le16 fw_rx_desc_bytes;
+       __le16 rsvd0;
+
+       u8 fw_msdu_rx_desc[0];
+} __packed;
+
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK     0x1F
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB      0
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB  5
+
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB  0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK   0x00000FC0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB    6
+
+/*
+ * target -> host test message definition
+ *
+ * The following field definitions describe the format of the test
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header, followed by a variable
+ * number of 32-bit integer values, followed by a variable number
+ * of 8-bit character values.
+ *
+ * |31                         16|15           8|7            0|
+ * |-----------------------------------------------------------|
+ * |          num chars          |   num ints   |   msg type   |
+ * |-----------------------------------------------------------|
+ * |                           int 0                           |
+ * |-----------------------------------------------------------|
+ * |                           int 1                           |
+ * |-----------------------------------------------------------|
+ * |                            ...                            |
+ * |-----------------------------------------------------------|
+ * |    char 3    |    char 2    |    char 1    |    char 0    |
+ * |-----------------------------------------------------------|
+ * |              |              |      ...     |    char 4    |
+ * |-----------------------------------------------------------|
+ *   - MSG_TYPE
+ *     Bits 7:0
+ *     Purpose: identifies this as a test message
+ *     Value: HTT_MSG_TYPE_TEST
+ *   - NUM_INTS
+ *     Bits 15:8
+ *     Purpose: indicate how many 32-bit integers follow the message header
+ *   - NUM_CHARS
+ *     Bits 31:16
+ *     Purpose: indicate how many 8-bit charaters follow the series of integers
+ */
+struct htt_rx_test {
+       u8 num_ints;
+       __le16 num_chars;
+
+       /* payload consists of 2 lists:
+        *  a) num_ints * sizeof(__le32)
+        *  b) num_chars * sizeof(u8) aligned to 4bytes */
+       u8 payload[0];
+} __packed;
+
+static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
+{
+       return (__le32 *)rx_test->payload;
+}
+
+static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
+{
+       return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
+}
+
+/*
+ * target -> host packet log message
+ *
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31          24|23          16|15           8|7            0|
+ * |-----------------------------------------------------------|
+ * |              |              |              |   msg type   |
+ * |-----------------------------------------------------------|
+ * |                        payload                            |
+ * |-----------------------------------------------------------|
+ *   - MSG_TYPE
+ *     Bits 7:0
+ *     Purpose: identifies this as a test message
+ *     Value: HTT_MSG_TYPE_PACKETLOG
+ */
+struct htt_pktlog_msg {
+       u8 pad[3];
+       __le32 payload[1 /* or more */];
+} __packed;
+
+struct htt_dbg_stats_rx_reorder_stats {
+       /* Non QoS MPDUs received */
+       __le32 deliver_non_qos;
+
+       /* MPDUs received in-order */
+       __le32 deliver_in_order;
+
+       /* Flush due to reorder timer expired */
+       __le32 deliver_flush_timeout;
+
+       /* Flush due to move out of window */
+       __le32 deliver_flush_oow;
+
+       /* Flush due to DELBA */
+       __le32 deliver_flush_delba;
+
+       /* MPDUs dropped due to FCS error */
+       __le32 fcs_error;
+
+       /* MPDUs dropped due to monitor mode non-data packet */
+       __le32 mgmt_ctrl;
+
+       /* MPDUs dropped due to invalid peer */
+       __le32 invalid_peer;
+
+       /* MPDUs dropped due to duplication (non aggregation) */
+       __le32 dup_non_aggr;
+
+       /* MPDUs dropped due to processed before */
+       __le32 dup_past;
+
+       /* MPDUs dropped due to duplicate in reorder queue */
+       __le32 dup_in_reorder;
+
+       /* Reorder timeout happened */
+       __le32 reorder_timeout;
+
+       /* invalid bar ssn */
+       __le32 invalid_bar_ssn;
+
+       /* reorder reset due to bar ssn */
+       __le32 ssn_reset;
+};
+
+struct htt_dbg_stats_wal_tx_stats {
+       /* Num HTT cookies queued to dispatch list */
+       __le32 comp_queued;
+
+       /* Num HTT cookies dispatched */
+       __le32 comp_delivered;
+
+       /* Num MSDU queued to WAL */
+       __le32 msdu_enqued;
+
+       /* Num MPDU queue to WAL */
+       __le32 mpdu_enqued;
+
+       /* Num MSDUs dropped by WMM limit */
+       __le32 wmm_drop;
+
+       /* Num Local frames queued */
+       __le32 local_enqued;
+
+       /* Num Local frames done */
+       __le32 local_freed;
+
+       /* Num queued to HW */
+       __le32 hw_queued;
+
+       /* Num PPDU reaped from HW */
+       __le32 hw_reaped;
+
+       /* Num underruns */
+       __le32 underrun;
+
+       /* Num PPDUs cleaned up in TX abort */
+       __le32 tx_abort;
+
+       /* Num MPDUs requed by SW */
+       __le32 mpdus_requed;
+
+       /* excessive retries */
+       __le32 tx_ko;
+
+       /* data hw rate code */
+       __le32 data_rc;
+
+       /* Scheduler self triggers */
+       __le32 self_triggers;
+
+       /* frames dropped due to excessive sw retries */
+       __le32 sw_retry_failure;
+
+       /* illegal rate phy errors  */
+       __le32 illgl_rate_phy_err;
+
+       /* wal pdev continous xretry */
+       __le32 pdev_cont_xretry;
+
+       /* wal pdev continous xretry */
+       __le32 pdev_tx_timeout;
+
+       /* wal pdev resets  */
+       __le32 pdev_resets;
+
+       __le32 phy_underrun;
+
+       /* MPDU is more than txop limit */
+       __le32 txop_ovf;
+} __packed;
+
+struct htt_dbg_stats_wal_rx_stats {
+       /* Cnts any change in ring routing mid-ppdu */
+       __le32 mid_ppdu_route_change;
+
+       /* Total number of statuses processed */
+       __le32 status_rcvd;
+
+       /* Extra frags on rings 0-3 */
+       __le32 r0_frags;
+       __le32 r1_frags;
+       __le32 r2_frags;
+       __le32 r3_frags;
+
+       /* MSDUs / MPDUs delivered to HTT */
+       __le32 htt_msdus;
+       __le32 htt_mpdus;
+
+       /* MSDUs / MPDUs delivered to local stack */
+       __le32 loc_msdus;
+       __le32 loc_mpdus;
+
+       /* AMSDUs that have more MSDUs than the status ring size */
+       __le32 oversize_amsdu;
+
+       /* Number of PHY errors */
+       __le32 phy_errs;
+
+       /* Number of PHY errors drops */
+       __le32 phy_err_drop;
+
+       /* Number of mpdu errors - FCS, MIC, ENC etc. */
+       __le32 mpdu_errs;
+} __packed;
+
+struct htt_dbg_stats_wal_peer_stats {
+       __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+} __packed;
+
+struct htt_dbg_stats_wal_pdev_txrx {
+       struct htt_dbg_stats_wal_tx_stats tx_stats;
+       struct htt_dbg_stats_wal_rx_stats rx_stats;
+       struct htt_dbg_stats_wal_peer_stats peer_stats;
+} __packed;
+
+struct htt_dbg_stats_rx_rate_info {
+       __le32 mcs[10];
+       __le32 sgi[10];
+       __le32 nss[4];
+       __le32 stbc[10];
+       __le32 bw[3];
+       __le32 pream[6];
+       __le32 ldpc;
+       __le32 txbf;
+};
+
+/*
+ * htt_dbg_stats_status -
+ * present -     The requested stats have been delivered in full.
+ *               This indicates that either the stats information was contained
+ *               in its entirety within this message, or else this message
+ *               completes the delivery of the requested stats info that was
+ *               partially delivered through earlier STATS_CONF messages.
+ * partial -     The requested stats have been delivered in part.
+ *               One or more subsequent STATS_CONF messages with the same
+ *               cookie value will be sent to deliver the remainder of the
+ *               information.
+ * error -       The requested stats could not be delivered, for example due
+ *               to a shortage of memory to construct a message holding the
+ *               requested stats.
+ * invalid -     The requested stat type is either not recognized, or the
+ *               target is configured to not gather the stats type in question.
+ * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ * series_done - This special value indicates that no further stats info
+ *               elements are present within a series of stats info elems
+ *               (within a stats upload confirmation message).
+ */
+enum htt_dbg_stats_status {
+       HTT_DBG_STATS_STATUS_PRESENT     = 0,
+       HTT_DBG_STATS_STATUS_PARTIAL     = 1,
+       HTT_DBG_STATS_STATUS_ERROR       = 2,
+       HTT_DBG_STATS_STATUS_INVALID     = 3,
+       HTT_DBG_STATS_STATUS_SERIES_DONE = 7
+};
+
+/*
+ * target -> host statistics upload
+ *
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a series of tag-length-value stats information elements.
+ * The tag-length header for each stats info element also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A special value of all 1's in this status field is used to indicate
+ * the end of the series of stats info elements.
+ *
+ *
+ * |31                         16|15           8|7   5|4       0|
+ * |------------------------------------------------------------|
+ * |                  reserved                  |    msg type   |
+ * |------------------------------------------------------------|
+ * |                        cookie LSBs                         |
+ * |------------------------------------------------------------|
+ * |                        cookie MSBs                         |
+ * |------------------------------------------------------------|
+ * |      stats entry length     |   reserved   |  S  |stat type|
+ * |------------------------------------------------------------|
+ * |                                                            |
+ * |                  type-specific stats info                  |
+ * |                                                            |
+ * |------------------------------------------------------------|
+ * |      stats entry length     |   reserved   |  S  |stat type|
+ * |------------------------------------------------------------|
+ * |                                                            |
+ * |                  type-specific stats info                  |
+ * |                                                            |
+ * |------------------------------------------------------------|
+ * |              n/a            |   reserved   | 111 |   n/a   |
+ * |------------------------------------------------------------|
+ * Header fields:
+ *  - MSG_TYPE
+ *    Bits 7:0
+ *    Purpose: identifies this is a statistics upload confirmation message
+ *    Value: 0x9
+ *  - COOKIE_LSBS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to match a target->host stats confirmation
+ *        message with its preceding host->target stats request message.
+ *    Value: LSBs of the opaque cookie specified by the host-side requestor
+ *  - COOKIE_MSBS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to match a target->host stats confirmation
+ *        message with its preceding host->target stats request message.
+ *    Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ *  - STAT_TYPE
+ *    Bits 4:0
+ *    Purpose: identifies the type of statistics info held in the
+ *        following information element
+ *    Value: htt_dbg_stats_type
+ *  - STATUS
+ *    Bits 7:5
+ *    Purpose: indicate whether the requested stats are present
+ *    Value: htt_dbg_stats_status, including a special value (0x7) to mark
+ *        the completion of the stats entry series
+ *  - LENGTH
+ *    Bits 31:16
+ *    Purpose: indicate the stats information size
+ *    Value: This field specifies the number of bytes of stats information
+ *       that follows the element tag-length header.
+ *       It is expected but not required that this length is a multiple of
+ *       4 bytes.  Even if the length is not an integer multiple of 4, the
+ *       subsequent stats entry header will begin on a 4-byte aligned
+ *       boundary.
+ */
+
+#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F
+#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB  0
+#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK    0xE0
+#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB     5
+
+struct htt_stats_conf_item {
+       union {
+               u8 info;
+               struct {
+                       u8 stat_type:5; /* %HTT_DBG_STATS_ */
+                       u8 status:3; /* %HTT_DBG_STATS_STATUS_ */
+               } __packed;
+       } __packed;
+       u8 pad;
+       __le16 length;
+       u8 payload[0]; /* roundup(length, 4) long */
+} __packed;
+
+struct htt_stats_conf {
+       u8 pad[3];
+       __le32 cookie_lsb;
+       __le32 cookie_msb;
+
+       /* each item has variable length! */
+       struct htt_stats_conf_item items[0];
+} __packed;
+
+static inline struct htt_stats_conf_item *htt_stats_conf_next_item(
+                                       const struct htt_stats_conf_item *item)
+{
+       return (void *)item + sizeof(*item) + roundup(item->length, 4);
+}
+/*
+ * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
+ *
+ * The following field definitions describe the format of the HTT host
+ * to target frag_desc/msdu_ext bank configuration message.
+ * The message contains the based address and the min and max id of the
+ * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
+ * MSDU_EXT/FRAG_DESC.
+ * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
+ * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
+ * the hardware does the mapping/translation.
+ *
+ * Total banks that can be configured is configured to 16.
+ *
+ * This should be called before any TX has be initiated by the HTT
+ *
+ * |31                         16|15           8|7   5|4       0|
+ * |------------------------------------------------------------|
+ * | DESC_SIZE    |  NUM_BANKS   | RES |SWP|pdev|    msg type   |
+ * |------------------------------------------------------------|
+ * |                     BANK0_BASE_ADDRESS                     |
+ * |------------------------------------------------------------|
+ * |                            ...                             |
+ * |------------------------------------------------------------|
+ * |                    BANK15_BASE_ADDRESS                     |
+ * |------------------------------------------------------------|
+ * |       BANK0_MAX_ID          |       BANK0_MIN_ID           |
+ * |------------------------------------------------------------|
+ * |                            ...                             |
+ * |------------------------------------------------------------|
+ * |       BANK15_MAX_ID         |       BANK15_MIN_ID          |
+ * |------------------------------------------------------------|
+ * Header fields:
+ *  - MSG_TYPE
+ *    Bits 7:0
+ *    Value: 0x6
+ *  - BANKx_BASE_ADDRESS
+ *    Bits 31:0
+ *    Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
+ *         bank physical/bus address.
+ *  - BANKx_MIN_ID
+ *    Bits 15:0
+ *    Purpose: Provide a mechanism to specify the min index that needs to
+ *          mapped.
+ *  - BANKx_MAX_ID
+ *    Bits 31:16
+ *    Purpose: Provide a mechanism to specify the max index that needs to
+ *
+ */
+struct htt_frag_desc_bank_id {
+       __le16 bank_min_id;
+       __le16 bank_max_id;
+} __packed;
+
+/* real is 16 but it wouldn't fit in the max htt message size
+ * so we use a conservatively safe value for now */
+#define HTT_FRAG_DESC_BANK_MAX 4
+
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB  0
+#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP         (1 << 2)
+
+struct htt_frag_desc_bank_cfg {
+       u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+       u8 num_banks;
+       u8 desc_size;
+       __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+       struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+} __packed;
+
+union htt_rx_pn_t {
+       /* WEP: 24-bit PN */
+       u32 pn24;
+
+       /* TKIP or CCMP: 48-bit PN */
+       u_int64_t pn48;
+
+       /* WAPI: 128-bit PN */
+       u_int64_t pn128[2];
+};
+
+struct htt_cmd {
+       struct htt_cmd_hdr hdr;
+       union {
+               struct htt_ver_req ver_req;
+               struct htt_mgmt_tx_desc mgmt_tx;
+               struct htt_data_tx_desc data_tx;
+               struct htt_rx_ring_setup rx_setup;
+               struct htt_stats_req stats_req;
+               struct htt_oob_sync_req oob_sync_req;
+               struct htt_aggr_conf aggr_conf;
+               struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+       };
+} __packed;
+
+struct htt_resp {
+       struct htt_resp_hdr hdr;
+       union {
+               struct htt_ver_resp ver_resp;
+               struct htt_mgmt_tx_completion mgmt_tx_completion;
+               struct htt_data_tx_completion data_tx_completion;
+               struct htt_rx_indication rx_ind;
+               struct htt_rx_fragment_indication rx_frag_ind;
+               struct htt_rx_peer_map peer_map;
+               struct htt_rx_peer_unmap peer_unmap;
+               struct htt_rx_flush rx_flush;
+               struct htt_rx_addba rx_addba;
+               struct htt_rx_delba rx_delba;
+               struct htt_security_indication security_indication;
+               struct htt_rc_update rc_update;
+               struct htt_rx_test rx_test;
+               struct htt_pktlog_msg pktlog_msg;
+               struct htt_stats_conf stats_conf;
+       };
+} __packed;
+
+
+/*** host side structures follow ***/
+
+struct htt_tx_done {
+       u32 msdu_id;
+       bool discard;
+       bool no_ack;
+};
+
+struct htt_peer_map_event {
+       u8 vdev_id;
+       u16 peer_id;
+       u8 addr[ETH_ALEN];
+};
+
+struct htt_peer_unmap_event {
+       u16 peer_id;
+};
+
+struct htt_rx_info {
+       struct sk_buff *skb;
+       enum htt_rx_mpdu_status status;
+       enum htt_rx_mpdu_encrypt_type encrypt_type;
+       s8 signal;
+       struct {
+               u8 info0;
+               u32 info1;
+               u32 info2;
+       } rate;
+       bool fcs_err;
+};
+
+struct ath10k_htt {
+       struct ath10k *ar;
+       enum ath10k_htc_ep_id eid;
+
+       int max_throughput_mbps;
+       u8 target_version_major;
+       u8 target_version_minor;
+       struct completion target_version_received;
+
+       struct {
+               /*
+                * Ring of network buffer objects - This ring is
+                * used exclusively by the host SW. This ring
+                * mirrors the dev_addrs_ring that is shared
+                * between the host SW and the MAC HW. The host SW
+                * uses this netbufs ring to locate the network
+                * buffer objects whose data buffers the HW has
+                * filled.
+                */
+               struct sk_buff **netbufs_ring;
+               /*
+                * Ring of buffer addresses -
+                * This ring holds the "physical" device address of the
+                * rx buffers the host SW provides for the MAC HW to
+                * fill.
+                */
+               __le32 *paddrs_ring;
+
+               /*
+                * Base address of ring, as a "physical" device address
+                * rather than a CPU address.
+                */
+               dma_addr_t base_paddr;
+
+               /* how many elems in the ring (power of 2) */
+               int size;
+
+               /* size - 1 */
+               unsigned size_mask;
+
+               /* how many rx buffers to keep in the ring */
+               int fill_level;
+
+               /* how many rx buffers (full+empty) are in the ring */
+               int fill_cnt;
+
+               /*
+                * alloc_idx - where HTT SW has deposited empty buffers
+                * This is allocated in consistent mem, so that the FW can
+                * read this variable, and program the HW's FW_IDX reg with
+                * the value of this shadow register.
+                */
+               struct {
+                       __le32 *vaddr;
+                       dma_addr_t paddr;
+               } alloc_idx;
+
+               /* where HTT SW has processed bufs filled by rx MAC DMA */
+               struct {
+                       unsigned msdu_payld;
+               } sw_rd_idx;
+
+               /*
+                * refill_retry_timer - timer triggered when the ring is
+                * not refilled to the level expected
+                */
+               struct timer_list refill_retry_timer;
+
+               /* Protects access to all rx ring buffer state variables */
+               spinlock_t lock;
+       } rx_ring;
+
+       unsigned int prefetch_len;
+
+       /* Protects access to %pending_tx, %used_msdu_ids */
+       spinlock_t tx_lock;
+       int max_num_pending_tx;
+       int num_pending_tx;
+       struct sk_buff **pending_tx;
+       unsigned long *used_msdu_ids; /* bitmap */
+       wait_queue_head_t empty_tx_wq;
+
+       /* set if host-fw communication goes haywire
+        * used to avoid further failures */
+       bool rx_confused;
+};
+
+#define RX_HTT_HDR_STATUS_LEN 64
+
+/* This structure layout is programmed via rx ring setup
+ * so that FW knows how to transfer the rx descriptor to the host.
+ * Buffers like this are placed on the rx ring. */
+struct htt_rx_desc {
+       union {
+               /* This field is filled on the host using the msdu buffer
+                * from htt_rx_indication */
+               struct fw_rx_desc_base fw_desc;
+               u32 pad;
+       } __packed;
+       struct {
+               struct rx_attention attention;
+               struct rx_frag_info frag_info;
+               struct rx_mpdu_start mpdu_start;
+               struct rx_msdu_start msdu_start;
+               struct rx_msdu_end msdu_end;
+               struct rx_mpdu_end mpdu_end;
+               struct rx_ppdu_start ppdu_start;
+               struct rx_ppdu_end ppdu_end;
+       } __packed;
+       u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+       u8 msdu_payload[0];
+};
+
+#define HTT_RX_DESC_ALIGN 8
+
+#define HTT_MAC_ADDR_LEN 6
+
+/*
+ * FIX THIS
+ * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
+ * rounded up to a cache line size.
+ */
+#define HTT_RX_BUF_SIZE 1920
+#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+
+/*
+ * DMA_MAP expects the buffer to be an integral number of cache lines.
+ * Rather than checking the actual cache line size, this code makes a
+ * conservative estimate of what the cache line size could be.
+ */
+#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
+#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
+
+struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar);
+int ath10k_htt_attach_target(struct ath10k_htt *htt);
+void ath10k_htt_detach(struct ath10k_htt *htt);
+
+int ath10k_htt_tx_attach(struct ath10k_htt *htt);
+void ath10k_htt_tx_detach(struct ath10k_htt *htt);
+int ath10k_htt_rx_attach(struct ath10k_htt *htt);
+void ath10k_htt_rx_detach(struct ath10k_htt *htt);
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
+
+void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
+int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
new file mode 100644 (file)
index 0000000..de058d7
--- /dev/null
@@ -0,0 +1,1167 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "htc.h"
+#include "htt.h"
+#include "txrx.h"
+#include "debug.h"
+
+#include <linux/log2.h>
+
+/* slightly larger than one large A-MPDU */
+#define HTT_RX_RING_SIZE_MIN 128
+
+/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
+#define HTT_RX_RING_SIZE_MAX 2048
+
+#define HTT_RX_AVG_FRM_BYTES 1000
+
+/* ms, very conservative */
+#define HTT_RX_HOST_LATENCY_MAX_MS 20
+
+/* ms, conservative */
+#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
+
+/* when under memory pressure rx ring refill may fail and needs a retry */
+#define HTT_RX_RING_REFILL_RETRY_MS 50
+
+static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
+{
+       int size;
+
+       /*
+        * It is expected that the host CPU will typically be able to
+        * service the rx indication from one A-MPDU before the rx
+        * indication from the subsequent A-MPDU happens, roughly 1-2 ms
+        * later. However, the rx ring should be sized very conservatively,
+        * to accomodate the worst reasonable delay before the host CPU
+        * services a rx indication interrupt.
+        *
+        * The rx ring need not be kept full of empty buffers. In theory,
+        * the htt host SW can dynamically track the low-water mark in the
+        * rx ring, and dynamically adjust the level to which the rx ring
+        * is filled with empty buffers, to dynamically meet the desired
+        * low-water mark.
+        *
+        * In contrast, it's difficult to resize the rx ring itself, once
+        * it's in use. Thus, the ring itself should be sized very
+        * conservatively, while the degree to which the ring is filled
+        * with empty buffers should be sized moderately conservatively.
+        */
+
+       /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
+       size =
+           htt->max_throughput_mbps +
+           1000  /
+           (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
+
+       if (size < HTT_RX_RING_SIZE_MIN)
+               size = HTT_RX_RING_SIZE_MIN;
+
+       if (size > HTT_RX_RING_SIZE_MAX)
+               size = HTT_RX_RING_SIZE_MAX;
+
+       size = roundup_pow_of_two(size);
+
+       return size;
+}
+
+static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
+{
+       int size;
+
+       /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
+       size =
+           htt->max_throughput_mbps *
+           1000  /
+           (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
+
+       /*
+        * Make sure the fill level is at least 1 less than the ring size.
+        * Leaving 1 element empty allows the SW to easily distinguish
+        * between a full ring vs. an empty ring.
+        */
+       if (size >= htt->rx_ring.size)
+               size = htt->rx_ring.size - 1;
+
+       return size;
+}
+
+static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
+{
+       struct sk_buff *skb;
+       struct ath10k_skb_cb *cb;
+       int i;
+
+       for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
+               skb = htt->rx_ring.netbufs_ring[i];
+               cb = ATH10K_SKB_CB(skb);
+               dma_unmap_single(htt->ar->dev, cb->paddr,
+                                skb->len + skb_tailroom(skb),
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb_any(skb);
+       }
+
+       htt->rx_ring.fill_cnt = 0;
+}
+
+static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+       struct htt_rx_desc *rx_desc;
+       struct sk_buff *skb;
+       dma_addr_t paddr;
+       int ret = 0, idx;
+
+       idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
+       while (num > 0) {
+               skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
+               if (!skb) {
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+
+               if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
+                       skb_pull(skb,
+                                PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
+                                skb->data);
+
+               /* Clear rx_desc attention word before posting to Rx ring */
+               rx_desc = (struct htt_rx_desc *)skb->data;
+               rx_desc->attention.flags = __cpu_to_le32(0);
+
+               paddr = dma_map_single(htt->ar->dev, skb->data,
+                                      skb->len + skb_tailroom(skb),
+                                      DMA_FROM_DEVICE);
+
+               if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
+                       dev_kfree_skb_any(skb);
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+
+               ATH10K_SKB_CB(skb)->paddr = paddr;
+               htt->rx_ring.netbufs_ring[idx] = skb;
+               htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
+               htt->rx_ring.fill_cnt++;
+
+               num--;
+               idx++;
+               idx &= htt->rx_ring.size_mask;
+       }
+
+fail:
+       *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
+       return ret;
+}
+
+static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+       lockdep_assert_held(&htt->rx_ring.lock);
+       return __ath10k_htt_rx_ring_fill_n(htt, num);
+}
+
+static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
+{
+       int ret, num_to_fill;
+
+       spin_lock_bh(&htt->rx_ring.lock);
+       num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+       ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
+       if (ret == -ENOMEM) {
+               /*
+                * Failed to fill it to the desired level -
+                * we'll start a timer and try again next time.
+                * As long as enough buffers are left in the ring for
+                * another A-MPDU rx, no special recovery is needed.
+                */
+               mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+                         msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+       }
+       spin_unlock_bh(&htt->rx_ring.lock);
+}
+
+static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
+{
+       struct ath10k_htt *htt = (struct ath10k_htt *)arg;
+       ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
+{
+       return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
+               htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
+}
+
+void ath10k_htt_rx_detach(struct ath10k_htt *htt)
+{
+       int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+
+       del_timer_sync(&htt->rx_ring.refill_retry_timer);
+
+       while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
+               struct sk_buff *skb =
+                               htt->rx_ring.netbufs_ring[sw_rd_idx];
+               struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+               dma_unmap_single(htt->ar->dev, cb->paddr,
+                                skb->len + skb_tailroom(skb),
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
+               sw_rd_idx++;
+               sw_rd_idx &= htt->rx_ring.size_mask;
+       }
+
+       dma_free_coherent(htt->ar->dev,
+                         (htt->rx_ring.size *
+                          sizeof(htt->rx_ring.paddrs_ring)),
+                         htt->rx_ring.paddrs_ring,
+                         htt->rx_ring.base_paddr);
+
+       dma_free_coherent(htt->ar->dev,
+                         sizeof(*htt->rx_ring.alloc_idx.vaddr),
+                         htt->rx_ring.alloc_idx.vaddr,
+                         htt->rx_ring.alloc_idx.paddr);
+
+       kfree(htt->rx_ring.netbufs_ring);
+}
+
+static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
+{
+       int idx;
+       struct sk_buff *msdu;
+
+       spin_lock_bh(&htt->rx_ring.lock);
+
+       if (ath10k_htt_rx_ring_elems(htt) == 0)
+               ath10k_warn("htt rx ring is empty!\n");
+
+       idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+       msdu = htt->rx_ring.netbufs_ring[idx];
+
+       idx++;
+       idx &= htt->rx_ring.size_mask;
+       htt->rx_ring.sw_rd_idx.msdu_payld = idx;
+       htt->rx_ring.fill_cnt--;
+
+       spin_unlock_bh(&htt->rx_ring.lock);
+       return msdu;
+}
+
+static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
+{
+       struct sk_buff *next;
+
+       while (skb) {
+               next = skb->next;
+               dev_kfree_skb_any(skb);
+               skb = next;
+       }
+}
+
+static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
+                                  u8 **fw_desc, int *fw_desc_len,
+                                  struct sk_buff **head_msdu,
+                                  struct sk_buff **tail_msdu)
+{
+       int msdu_len, msdu_chaining = 0;
+       struct sk_buff *msdu;
+       struct htt_rx_desc *rx_desc;
+
+       if (ath10k_htt_rx_ring_elems(htt) == 0)
+               ath10k_warn("htt rx ring is empty!\n");
+
+       if (htt->rx_confused) {
+               ath10k_warn("htt is confused. refusing rx\n");
+               return 0;
+       }
+
+       msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
+       while (msdu) {
+               int last_msdu, msdu_len_invalid, msdu_chained;
+
+               dma_unmap_single(htt->ar->dev,
+                                ATH10K_SKB_CB(msdu)->paddr,
+                                msdu->len + skb_tailroom(msdu),
+                                DMA_FROM_DEVICE);
+
+               ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
+                               msdu->data, msdu->len + skb_tailroom(msdu));
+
+               rx_desc = (struct htt_rx_desc *)msdu->data;
+
+               /* FIXME: we must report msdu payload since this is what caller
+                *        expects now */
+               skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+               skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+
+               /*
+                * Sanity check - confirm the HW is finished filling in the
+                * rx data.
+                * If the HW and SW are working correctly, then it's guaranteed
+                * that the HW's MAC DMA is done before this point in the SW.
+                * To prevent the case that we handle a stale Rx descriptor,
+                * just assert for now until we have a way to recover.
+                */
+               if (!(__le32_to_cpu(rx_desc->attention.flags)
+                               & RX_ATTENTION_FLAGS_MSDU_DONE)) {
+                       ath10k_htt_rx_free_msdu_chain(*head_msdu);
+                       *head_msdu = NULL;
+                       msdu = NULL;
+                       ath10k_err("htt rx stopped. cannot recover\n");
+                       htt->rx_confused = true;
+                       break;
+               }
+
+               /*
+                * Copy the FW rx descriptor for this MSDU from the rx
+                * indication message into the MSDU's netbuf. HL uses the
+                * same rx indication message definition as LL, and simply
+                * appends new info (fields from the HW rx desc, and the
+                * MSDU payload itself). So, the offset into the rx
+                * indication message only has to account for the standard
+                * offset of the per-MSDU FW rx desc info within the
+                * message, and how many bytes of the per-MSDU FW rx desc
+                * info have already been consumed. (And the endianness of
+                * the host, since for a big-endian host, the rx ind
+                * message contents, including the per-MSDU rx desc bytes,
+                * were byteswapped during upload.)
+                */
+               if (*fw_desc_len > 0) {
+                       rx_desc->fw_desc.info0 = **fw_desc;
+                       /*
+                        * The target is expected to only provide the basic
+                        * per-MSDU rx descriptors. Just to be sure, verify
+                        * that the target has not attached extension data
+                        * (e.g. LRO flow ID).
+                        */
+
+                       /* or more, if there's extension data */
+                       (*fw_desc)++;
+                       (*fw_desc_len)--;
+               } else {
+                       /*
+                        * When an oversized AMSDU happened, FW will lost
+                        * some of MSDU status - in this case, the FW
+                        * descriptors provided will be less than the
+                        * actual MSDUs inside this MPDU. Mark the FW
+                        * descriptors so that it will still deliver to
+                        * upper stack, if no CRC error for this MPDU.
+                        *
+                        * FIX THIS - the FW descriptors are actually for
+                        * MSDUs in the end of this A-MSDU instead of the
+                        * beginning.
+                        */
+                       rx_desc->fw_desc.info0 = 0;
+               }
+
+               msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
+                                       & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
+                                          RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
+               msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
+                             RX_MSDU_START_INFO0_MSDU_LENGTH);
+               msdu_chained = rx_desc->frag_info.ring2_more_count;
+
+               if (msdu_len_invalid)
+                       msdu_len = 0;
+
+               skb_trim(msdu, 0);
+               skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
+               msdu_len -= msdu->len;
+
+               /* FIXME: Do chained buffers include htt_rx_desc or not? */
+               while (msdu_chained--) {
+                       struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
+
+                       dma_unmap_single(htt->ar->dev,
+                                        ATH10K_SKB_CB(next)->paddr,
+                                        next->len + skb_tailroom(next),
+                                        DMA_FROM_DEVICE);
+
+                       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
+                                       next->data,
+                                       next->len + skb_tailroom(next));
+
+                       skb_trim(next, 0);
+                       skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
+                       msdu_len -= next->len;
+
+                       msdu->next = next;
+                       msdu = next;
+                       msdu_chaining = 1;
+               }
+
+               if (msdu_len > 0) {
+                       /* This may suggest FW bug? */
+                       ath10k_warn("htt rx msdu len not consumed (%d)\n",
+                                   msdu_len);
+               }
+
+               last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
+                               RX_MSDU_END_INFO0_LAST_MSDU;
+
+               if (last_msdu) {
+                       msdu->next = NULL;
+                       break;
+               } else {
+                       struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
+                       msdu->next = next;
+                       msdu = next;
+               }
+       }
+       *tail_msdu = msdu;
+
+       /*
+        * Don't refill the ring yet.
+        *
+        * First, the elements popped here are still in use - it is not
+        * safe to overwrite them until the matching call to
+        * mpdu_desc_list_next. Second, for efficiency it is preferable to
+        * refill the rx ring with 1 PPDU's worth of rx buffers (something
+        * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
+        * (something like 3 buffers). Consequently, we'll rely on the txrx
+        * SW to tell us when it is done pulling all the PPDU's rx buffers
+        * out of the rx ring, and then refill it just once.
+        */
+
+       return msdu_chaining;
+}
+
+int ath10k_htt_rx_attach(struct ath10k_htt *htt)
+{
+       dma_addr_t paddr;
+       void *vaddr;
+       struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
+
+       htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
+       if (!is_power_of_2(htt->rx_ring.size)) {
+               ath10k_warn("htt rx ring size is not power of 2\n");
+               return -EINVAL;
+       }
+
+       htt->rx_ring.size_mask = htt->rx_ring.size - 1;
+
+       /*
+        * Set the initial value for the level to which the rx ring
+        * should be filled, based on the max throughput and the
+        * worst likely latency for the host to fill the rx ring
+        * with new buffers. In theory, this fill level can be
+        * dynamically adjusted from the initial value set here, to
+        * reflect the actual host latency rather than a
+        * conservative assumption about the host latency.
+        */
+       htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
+
+       htt->rx_ring.netbufs_ring =
+               kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
+                       GFP_KERNEL);
+       if (!htt->rx_ring.netbufs_ring)
+               goto err_netbuf;
+
+       vaddr = dma_alloc_coherent(htt->ar->dev,
+                  (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
+                  &paddr, GFP_DMA);
+       if (!vaddr)
+               goto err_dma_ring;
+
+       htt->rx_ring.paddrs_ring = vaddr;
+       htt->rx_ring.base_paddr = paddr;
+
+       vaddr = dma_alloc_coherent(htt->ar->dev,
+                                  sizeof(*htt->rx_ring.alloc_idx.vaddr),
+                                  &paddr, GFP_DMA);
+       if (!vaddr)
+               goto err_dma_idx;
+
+       htt->rx_ring.alloc_idx.vaddr = vaddr;
+       htt->rx_ring.alloc_idx.paddr = paddr;
+       htt->rx_ring.sw_rd_idx.msdu_payld = 0;
+       *htt->rx_ring.alloc_idx.vaddr = 0;
+
+       /* Initialize the Rx refill retry timer */
+       setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);
+
+       spin_lock_init(&htt->rx_ring.lock);
+
+       htt->rx_ring.fill_cnt = 0;
+       if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
+               goto err_fill_ring;
+
+       ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
+                  htt->rx_ring.size, htt->rx_ring.fill_level);
+       return 0;
+
+err_fill_ring:
+       ath10k_htt_rx_ring_free(htt);
+       dma_free_coherent(htt->ar->dev,
+                         sizeof(*htt->rx_ring.alloc_idx.vaddr),
+                         htt->rx_ring.alloc_idx.vaddr,
+                         htt->rx_ring.alloc_idx.paddr);
+err_dma_idx:
+       dma_free_coherent(htt->ar->dev,
+                         (htt->rx_ring.size *
+                          sizeof(htt->rx_ring.paddrs_ring)),
+                         htt->rx_ring.paddrs_ring,
+                         htt->rx_ring.base_paddr);
+err_dma_ring:
+       kfree(htt->rx_ring.netbufs_ring);
+err_netbuf:
+       return -ENOMEM;
+}
+
+static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
+{
+       switch (type) {
+       case HTT_RX_MPDU_ENCRYPT_WEP40:
+       case HTT_RX_MPDU_ENCRYPT_WEP104:
+               return 4;
+       case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+       case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
+       case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+       case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
+       case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+               return 8;
+       case HTT_RX_MPDU_ENCRYPT_NONE:
+               return 0;
+       }
+
+       ath10k_warn("unknown encryption type %d\n", type);
+       return 0;
+}
+
+static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
+{
+       switch (type) {
+       case HTT_RX_MPDU_ENCRYPT_NONE:
+       case HTT_RX_MPDU_ENCRYPT_WEP40:
+       case HTT_RX_MPDU_ENCRYPT_WEP104:
+       case HTT_RX_MPDU_ENCRYPT_WEP128:
+       case HTT_RX_MPDU_ENCRYPT_WAPI:
+               return 0;
+       case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+       case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+               return 4;
+       case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+               return 8;
+       }
+
+       ath10k_warn("unknown encryption type %d\n", type);
+       return 0;
+}
+
+/* Applies for first msdu in chain, before altering it. */
+static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
+{
+       struct htt_rx_desc *rxd;
+       enum rx_msdu_decap_format fmt;
+
+       rxd = (void *)skb->data - sizeof(*rxd);
+       fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+                       RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+       if (fmt == RX_MSDU_DECAP_RAW)
+               return (void *)skb->data;
+       else
+               return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+}
+
+/* This function only applies for first msdu in an msdu chain */
+static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
+{
+       if (ieee80211_is_data_qos(hdr->frame_control)) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               if (qc[0] & 0x80)
+                       return true;
+       }
+       return false;
+}
+
+static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
+                       struct htt_rx_info *info)
+{
+       struct htt_rx_desc *rxd;
+       struct sk_buff *amsdu;
+       struct sk_buff *first;
+       struct ieee80211_hdr *hdr;
+       struct sk_buff *skb = info->skb;
+       enum rx_msdu_decap_format fmt;
+       enum htt_rx_mpdu_encrypt_type enctype;
+       unsigned int hdr_len;
+       int crypto_len;
+
+       rxd = (void *)skb->data - sizeof(*rxd);
+       fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+                       RX_MSDU_START_INFO1_DECAP_FORMAT);
+       enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+                       RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+       /* FIXME: No idea what assumptions are safe here. Need logs */
+       if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
+           (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
+               ath10k_htt_rx_free_msdu_chain(skb->next);
+               skb->next = NULL;
+               return -ENOTSUPP;
+       }
+
+       /* A-MSDU max is a little less than 8K */
+       amsdu = dev_alloc_skb(8*1024);
+       if (!amsdu) {
+               ath10k_warn("A-MSDU allocation failed\n");
+               ath10k_htt_rx_free_msdu_chain(skb->next);
+               skb->next = NULL;
+               return -ENOMEM;
+       }
+
+       if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
+               int hdrlen;
+
+               hdr = (void *)rxd->rx_hdr_status;
+               hdrlen = ieee80211_hdrlen(hdr->frame_control);
+               memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
+       }
+
+       first = skb;
+       while (skb) {
+               void *decap_hdr;
+               int decap_len = 0;
+
+               rxd = (void *)skb->data - sizeof(*rxd);
+               fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+                               RX_MSDU_START_INFO1_DECAP_FORMAT);
+               decap_hdr = (void *)rxd->rx_hdr_status;
+
+               if (skb == first) {
+                       /* We receive linked A-MSDU subframe skbuffs. The
+                        * first one contains the original 802.11 header (and
+                        * possible crypto param) in the RX descriptor. The
+                        * A-MSDU subframe header follows that. Each part is
+                        * aligned to 4 byte boundary. */
+
+                       hdr = (void *)amsdu->data;
+                       hdr_len = ieee80211_hdrlen(hdr->frame_control);
+                       crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
+
+                       decap_hdr += roundup(hdr_len, 4);
+                       decap_hdr += roundup(crypto_len, 4);
+               }
+
+               if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
+                       /* Ethernet2 decap inserts ethernet header in place of
+                        * A-MSDU subframe header. */
+                       skb_pull(skb, 6 + 6 + 2);
+
+                       /* A-MSDU subframe header length */
+                       decap_len += 6 + 6 + 2;
+
+                       /* Ethernet2 decap also strips the LLC/SNAP so we need
+                        * to re-insert it. The LLC/SNAP follows A-MSDU
+                        * subframe header. */
+                       /* FIXME: Not all LLCs are 8 bytes long */
+                       decap_len += 8;
+
+                       memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
+               }
+
+               if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
+                       /* Native Wifi decap inserts regular 802.11 header
+                        * in place of A-MSDU subframe header. */
+                       hdr = (struct ieee80211_hdr *)skb->data;
+                       skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
+
+                       /* A-MSDU subframe header length */
+                       decap_len += 6 + 6 + 2;
+
+                       memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
+               }
+
+               if (fmt == RX_MSDU_DECAP_RAW)
+                       skb_trim(skb, skb->len - 4); /* remove FCS */
+
+               memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
+
+               /* A-MSDU subframes are padded to 4bytes
+                * but relative to first subframe, not the whole MPDU */
+               if (skb->next && ((decap_len + skb->len) & 3)) {
+                       int padlen = 4 - ((decap_len + skb->len) & 3);
+                       memset(skb_put(amsdu, padlen), 0, padlen);
+               }
+
+               skb = skb->next;
+       }
+
+       info->skb = amsdu;
+       info->encrypt_type = enctype;
+
+       ath10k_htt_rx_free_msdu_chain(first);
+
+       return 0;
+}
+
+static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+{
+       struct sk_buff *skb = info->skb;
+       struct htt_rx_desc *rxd;
+       struct ieee80211_hdr *hdr;
+       enum rx_msdu_decap_format fmt;
+       enum htt_rx_mpdu_encrypt_type enctype;
+
+       /* This shouldn't happen. If it does than it may be a FW bug. */
+       if (skb->next) {
+               ath10k_warn("received chained non A-MSDU frame\n");
+               ath10k_htt_rx_free_msdu_chain(skb->next);
+               skb->next = NULL;
+       }
+
+       rxd = (void *)skb->data - sizeof(*rxd);
+       fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+                       RX_MSDU_START_INFO1_DECAP_FORMAT);
+       enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+                       RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+       hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+
+       switch (fmt) {
+       case RX_MSDU_DECAP_RAW:
+               /* remove trailing FCS */
+               skb_trim(skb, skb->len - 4);
+               break;
+       case RX_MSDU_DECAP_NATIVE_WIFI:
+               /* nothing to do here */
+               break;
+       case RX_MSDU_DECAP_ETHERNET2_DIX:
+               /* macaddr[6] + macaddr[6] + ethertype[2] */
+               skb_pull(skb, 6 + 6 + 2);
+               break;
+       case RX_MSDU_DECAP_8023_SNAP_LLC:
+               /* macaddr[6] + macaddr[6] + len[2] */
+               /* we don't need this for non-A-MSDU */
+               skb_pull(skb, 6 + 6 + 2);
+               break;
+       }
+
+       if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
+               void *llc;
+               int llclen;
+
+               llclen = 8;
+               llc  = hdr;
+               llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
+               llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
+
+               skb_push(skb, llclen);
+               memcpy(skb->data, llc, llclen);
+       }
+
+       if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
+               int len = ieee80211_hdrlen(hdr->frame_control);
+               skb_push(skb, len);
+               memcpy(skb->data, hdr, len);
+       }
+
+       info->skb = skb;
+       info->encrypt_type = enctype;
+       return 0;
+}
+
+static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
+{
+       struct htt_rx_desc *rxd;
+       u32 flags;
+
+       rxd = (void *)skb->data - sizeof(*rxd);
+       flags = __le32_to_cpu(rxd->attention.flags);
+
+       if (flags & RX_ATTENTION_FLAGS_DECRYPT_ERR)
+               return true;
+
+       return false;
+}
+
+static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
+{
+       struct htt_rx_desc *rxd;
+       u32 flags;
+
+       rxd = (void *)skb->data - sizeof(*rxd);
+       flags = __le32_to_cpu(rxd->attention.flags);
+
+       if (flags & RX_ATTENTION_FLAGS_FCS_ERR)
+               return true;
+
+       return false;
+}
+
+static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
+                                 struct htt_rx_indication *rx)
+{
+       struct htt_rx_info info;
+       struct htt_rx_indication_mpdu_range *mpdu_ranges;
+       struct ieee80211_hdr *hdr;
+       int num_mpdu_ranges;
+       int fw_desc_len;
+       u8 *fw_desc;
+       int i, j;
+       int ret;
+
+       memset(&info, 0, sizeof(info));
+
+       fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
+       fw_desc = (u8 *)&rx->fw_desc;
+
+       num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+                            HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+       mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
+
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
+                       rx, sizeof(*rx) +
+                       (sizeof(struct htt_rx_indication_mpdu_range) *
+                               num_mpdu_ranges));
+
+       for (i = 0; i < num_mpdu_ranges; i++) {
+               info.status = mpdu_ranges[i].mpdu_range_status;
+
+               for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
+                       struct sk_buff *msdu_head, *msdu_tail;
+                       enum htt_rx_mpdu_status status;
+                       int msdu_chaining;
+
+                       msdu_head = NULL;
+                       msdu_tail = NULL;
+                       msdu_chaining = ath10k_htt_rx_amsdu_pop(htt,
+                                                        &fw_desc,
+                                                        &fw_desc_len,
+                                                        &msdu_head,
+                                                        &msdu_tail);
+
+                       if (!msdu_head) {
+                               ath10k_warn("htt rx no data!\n");
+                               continue;
+                       }
+
+                       if (msdu_head->len == 0) {
+                               ath10k_dbg(ATH10K_DBG_HTT,
+                                          "htt rx dropping due to zero-len\n");
+                               ath10k_htt_rx_free_msdu_chain(msdu_head);
+                               continue;
+                       }
+
+                       if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
+                               ath10k_htt_rx_free_msdu_chain(msdu_head);
+                               continue;
+                       }
+
+                       status = info.status;
+
+                       /* Skip mgmt frames while we handle this in WMI */
+                       if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
+                               ath10k_htt_rx_free_msdu_chain(msdu_head);
+                               continue;
+                       }
+
+                       if (status != HTT_RX_IND_MPDU_STATUS_OK &&
+                           status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
+                           !htt->ar->monitor_enabled) {
+                               ath10k_dbg(ATH10K_DBG_HTT,
+                                          "htt rx ignoring frame w/ status %d\n",
+                                          status);
+                               ath10k_htt_rx_free_msdu_chain(msdu_head);
+                               continue;
+                       }
+
+                       /* FIXME: we do not support chaining yet.
+                        * this needs investigation */
+                       if (msdu_chaining) {
+                               ath10k_warn("msdu_chaining is true\n");
+                               ath10k_htt_rx_free_msdu_chain(msdu_head);
+                               continue;
+                       }
+
+                       info.skb     = msdu_head;
+                       info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
+                       info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
+                       info.signal += rx->ppdu.combined_rssi;
+
+                       info.rate.info0 = rx->ppdu.info0;
+                       info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
+                       info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
+
+                       hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
+
+                       if (ath10k_htt_rx_hdr_is_amsdu(hdr))
+                               ret = ath10k_htt_rx_amsdu(htt, &info);
+                       else
+                               ret = ath10k_htt_rx_msdu(htt, &info);
+
+                       if (ret && !info.fcs_err) {
+                               ath10k_warn("error processing msdus %d\n", ret);
+                               dev_kfree_skb_any(info.skb);
+                               continue;
+                       }
+
+                       if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
+                               ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
+
+                       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
+                                       info.skb->data, info.skb->len);
+                       ath10k_process_rx(htt->ar, &info);
+               }
+       }
+
+       ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
+                               struct htt_rx_fragment_indication *frag)
+{
+       struct sk_buff *msdu_head, *msdu_tail;
+       struct htt_rx_desc *rxd;
+       enum rx_msdu_decap_format fmt;
+       struct htt_rx_info info = {};
+       struct ieee80211_hdr *hdr;
+       int msdu_chaining;
+       bool tkip_mic_err;
+       bool decrypt_err;
+       u8 *fw_desc;
+       int fw_desc_len, hdrlen, paramlen;
+       int trim;
+
+       fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
+       fw_desc = (u8 *)frag->fw_msdu_rx_desc;
+
+       msdu_head = NULL;
+       msdu_tail = NULL;
+       msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
+                                               &msdu_head, &msdu_tail);
+
+       ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
+
+       if (!msdu_head) {
+               ath10k_warn("htt rx frag no data\n");
+               return;
+       }
+
+       if (msdu_chaining || msdu_head != msdu_tail) {
+               ath10k_warn("aggregation with fragmentation?!\n");
+               ath10k_htt_rx_free_msdu_chain(msdu_head);
+               return;
+       }
+
+       /* FIXME: implement signal strength */
+
+       hdr = (struct ieee80211_hdr *)msdu_head->data;
+       rxd = (void *)msdu_head->data - sizeof(*rxd);
+       tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
+                               RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
+       decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
+                               RX_ATTENTION_FLAGS_DECRYPT_ERR);
+       fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
+                       RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+       if (fmt != RX_MSDU_DECAP_RAW) {
+               ath10k_warn("we dont support non-raw fragmented rx yet\n");
+               dev_kfree_skb_any(msdu_head);
+               goto end;
+       }
+
+       info.skb = msdu_head;
+       info.status = HTT_RX_IND_MPDU_STATUS_OK;
+       info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+                               RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+       if (tkip_mic_err) {
+               ath10k_warn("tkip mic error\n");
+               info.status = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR;
+       }
+
+       if (decrypt_err) {
+               ath10k_warn("decryption err in fragmented rx\n");
+               dev_kfree_skb_any(info.skb);
+               goto end;
+       }
+
+       if (info.encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
+               hdrlen = ieee80211_hdrlen(hdr->frame_control);
+               paramlen = ath10k_htt_rx_crypto_param_len(info.encrypt_type);
+
+               /* It is more efficient to move the header than the payload */
+               memmove((void *)info.skb->data + paramlen,
+                       (void *)info.skb->data,
+                       hdrlen);
+               skb_pull(info.skb, paramlen);
+               hdr = (struct ieee80211_hdr *)info.skb->data;
+       }
+
+       /* remove trailing FCS */
+       trim  = 4;
+
+       /* remove crypto trailer */
+       trim += ath10k_htt_rx_crypto_tail_len(info.encrypt_type);
+
+       /* last fragment of TKIP frags has MIC */
+       if (!ieee80211_has_morefrags(hdr->frame_control) &&
+           info.encrypt_type == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+               trim += 8;
+
+       if (trim > info.skb->len) {
+               ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
+               dev_kfree_skb_any(info.skb);
+               goto end;
+       }
+
+       skb_trim(info.skb, info.skb->len - trim);
+
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ",
+                       info.skb->data, info.skb->len);
+       ath10k_process_rx(htt->ar, &info);
+
+end:
+       if (fw_desc_len > 0) {
+               ath10k_dbg(ATH10K_DBG_HTT,
+                          "expecting more fragmented rx in one indication %d\n",
+                          fw_desc_len);
+       }
+}
+
+void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct ath10k_htt *htt = ar->htt;
+       struct htt_resp *resp = (struct htt_resp *)skb->data;
+
+       /* confirm alignment */
+       if (!IS_ALIGNED((unsigned long)skb->data, 4))
+               ath10k_warn("unaligned htt message, expect trouble\n");
+
+       ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n",
+                  resp->hdr.msg_type);
+       switch (resp->hdr.msg_type) {
+       case HTT_T2H_MSG_TYPE_VERSION_CONF: {
+               htt->target_version_major = resp->ver_resp.major;
+               htt->target_version_minor = resp->ver_resp.minor;
+               complete(&htt->target_version_received);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_RX_IND: {
+               ath10k_htt_rx_handler(htt, &resp->rx_ind);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_PEER_MAP: {
+               struct htt_peer_map_event ev = {
+                       .vdev_id = resp->peer_map.vdev_id,
+                       .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
+               };
+               memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
+               ath10k_peer_map_event(htt, &ev);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
+               struct htt_peer_unmap_event ev = {
+                       .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
+               };
+               ath10k_peer_unmap_event(htt, &ev);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
+               struct htt_tx_done tx_done = {};
+               int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
+
+               tx_done.msdu_id =
+                       __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+
+               switch (status) {
+               case HTT_MGMT_TX_STATUS_OK:
+                       break;
+               case HTT_MGMT_TX_STATUS_RETRY:
+                       tx_done.no_ack = true;
+                       break;
+               case HTT_MGMT_TX_STATUS_DROP:
+                       tx_done.discard = true;
+                       break;
+               }
+
+               ath10k_txrx_tx_completed(htt, &tx_done);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
+               struct htt_tx_done tx_done = {};
+               int status = MS(resp->data_tx_completion.flags,
+                               HTT_DATA_TX_STATUS);
+               __le16 msdu_id;
+               int i;
+
+               switch (status) {
+               case HTT_DATA_TX_STATUS_NO_ACK:
+                       tx_done.no_ack = true;
+                       break;
+               case HTT_DATA_TX_STATUS_OK:
+                       break;
+               case HTT_DATA_TX_STATUS_DISCARD:
+               case HTT_DATA_TX_STATUS_POSTPONE:
+               case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
+                       tx_done.discard = true;
+                       break;
+               default:
+                       ath10k_warn("unhandled tx completion status %d\n",
+                                   status);
+                       tx_done.discard = true;
+                       break;
+               }
+
+               ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
+                          resp->data_tx_completion.num_msdus);
+
+               for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
+                       msdu_id = resp->data_tx_completion.msdus[i];
+                       tx_done.msdu_id = __le16_to_cpu(msdu_id);
+                       ath10k_txrx_tx_completed(htt, &tx_done);
+               }
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_SEC_IND: {
+               struct ath10k *ar = htt->ar;
+               struct htt_security_indication *ev = &resp->security_indication;
+
+               ath10k_dbg(ATH10K_DBG_HTT,
+                          "sec ind peer_id %d unicast %d type %d\n",
+                         __le16_to_cpu(ev->peer_id),
+                         !!(ev->flags & HTT_SECURITY_IS_UNICAST),
+                         MS(ev->flags, HTT_SECURITY_TYPE));
+               complete(&ar->install_key_done);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
+               ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+                               skb->data, skb->len);
+               ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
+               break;
+       }
+       case HTT_T2H_MSG_TYPE_TEST:
+               /* FIX THIS */
+               break;
+       case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
+       case HTT_T2H_MSG_TYPE_STATS_CONF:
+       case HTT_T2H_MSG_TYPE_RX_ADDBA:
+       case HTT_T2H_MSG_TYPE_RX_DELBA:
+       case HTT_T2H_MSG_TYPE_RX_FLUSH:
+       default:
+               ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
+                          resp->hdr.msg_type);
+               ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+                               skb->data, skb->len);
+               break;
+       };
+
+       /* Free the indication buffer */
+       dev_kfree_skb_any(skb);
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
new file mode 100644 (file)
index 0000000..ef79106
--- /dev/null
@@ -0,0 +1,510 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "htt.h"
+#include "mac.h"
+#include "hif.h"
+#include "txrx.h"
+#include "debug.h"
+
+void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+       htt->num_pending_tx--;
+       if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
+               ieee80211_wake_queues(htt->ar->hw);
+}
+
+static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+       spin_lock_bh(&htt->tx_lock);
+       __ath10k_htt_tx_dec_pending(htt);
+       spin_unlock_bh(&htt->tx_lock);
+}
+
+static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
+{
+       int ret = 0;
+
+       spin_lock_bh(&htt->tx_lock);
+
+       if (htt->num_pending_tx >= htt->max_num_pending_tx) {
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       htt->num_pending_tx++;
+       if (htt->num_pending_tx == htt->max_num_pending_tx)
+               ieee80211_stop_queues(htt->ar->hw);
+
+exit:
+       spin_unlock_bh(&htt->tx_lock);
+       return ret;
+}
+
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
+{
+       int msdu_id;
+
+       lockdep_assert_held(&htt->tx_lock);
+
+       msdu_id = find_first_zero_bit(htt->used_msdu_ids,
+                                     htt->max_num_pending_tx);
+       if (msdu_id == htt->max_num_pending_tx)
+               return -ENOBUFS;
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
+       __set_bit(msdu_id, htt->used_msdu_ids);
+       return msdu_id;
+}
+
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
+{
+       lockdep_assert_held(&htt->tx_lock);
+
+       if (!test_bit(msdu_id, htt->used_msdu_ids))
+               ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id);
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
+       __clear_bit(msdu_id, htt->used_msdu_ids);
+}
+
+int ath10k_htt_tx_attach(struct ath10k_htt *htt)
+{
+       u8 pipe;
+
+       spin_lock_init(&htt->tx_lock);
+       init_waitqueue_head(&htt->empty_tx_wq);
+
+       /* At the beginning free queue number should hint us the maximum
+        * queue length */
+       pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id;
+       htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
+                                                                  pipe);
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
+                  htt->max_num_pending_tx);
+
+       htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
+                                 htt->max_num_pending_tx, GFP_KERNEL);
+       if (!htt->pending_tx)
+               return -ENOMEM;
+
+       htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
+                                    BITS_TO_LONGS(htt->max_num_pending_tx),
+                                    GFP_KERNEL);
+       if (!htt->used_msdu_ids) {
+               kfree(htt->pending_tx);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
+{
+       struct sk_buff *txdesc;
+       int msdu_id;
+
+       /* No locks needed. Called after communication with the device has
+        * been stopped. */
+
+       for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
+               if (!test_bit(msdu_id, htt->used_msdu_ids))
+                       continue;
+
+               txdesc = htt->pending_tx[msdu_id];
+               if (!txdesc)
+                       continue;
+
+               ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
+                          msdu_id);
+
+               if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
+                       ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
+
+               ATH10K_SKB_CB(txdesc)->htt.discard = true;
+               ath10k_txrx_tx_unref(htt, txdesc);
+       }
+}
+
+void ath10k_htt_tx_detach(struct ath10k_htt *htt)
+{
+       ath10k_htt_tx_cleanup_pending(htt);
+       kfree(htt->pending_tx);
+       kfree(htt->used_msdu_ids);
+       return;
+}
+
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+       struct ath10k_htt *htt = ar->htt;
+
+       if (skb_cb->htt.is_conf) {
+               dev_kfree_skb_any(skb);
+               return;
+       }
+
+       if (skb_cb->is_aborted) {
+               skb_cb->htt.discard = true;
+
+               /* if the skbuff is aborted we need to make sure we'll free up
+                * the tx resources, we can't simply run tx_unref() 2 times
+                * because if htt tx completion came in earlier we'd access
+                * unallocated memory */
+               if (skb_cb->htt.refcount > 1)
+                       skb_cb->htt.refcount = 1;
+       }
+
+       ath10k_txrx_tx_unref(htt, skb);
+}
+
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
+{
+       struct sk_buff *skb;
+       struct htt_cmd *cmd;
+       int len = 0;
+       int ret;
+
+       len += sizeof(cmd->hdr);
+       len += sizeof(cmd->ver_req);
+
+       skb = ath10k_htc_alloc_skb(len);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, len);
+       cmd = (struct htt_cmd *)skb->data;
+       cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
+
+       ATH10K_SKB_CB(skb)->htt.is_conf = true;
+
+       ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+
+       return 0;
+}
+
+int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
+{
+       struct sk_buff *skb;
+       struct htt_cmd *cmd;
+       struct htt_rx_ring_setup_ring *ring;
+       const int num_rx_ring = 1;
+       u16 flags;
+       u32 fw_idx;
+       int len;
+       int ret;
+
+       /*
+        * the HW expects the buffer to be an integral number of 4-byte
+        * "words"
+        */
+       BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+       BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+       len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+           + (sizeof(*ring) * num_rx_ring);
+       skb = ath10k_htc_alloc_skb(len);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, len);
+
+       cmd = (struct htt_cmd *)skb->data;
+       ring = &cmd->rx_setup.rings[0];
+
+       cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+       cmd->rx_setup.hdr.num_rings = 1;
+
+       /* FIXME: do we need all of this? */
+       flags = 0;
+       flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+       flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+       flags |= HTT_RX_RING_FLAGS_PPDU_START;
+       flags |= HTT_RX_RING_FLAGS_PPDU_END;
+       flags |= HTT_RX_RING_FLAGS_MPDU_START;
+       flags |= HTT_RX_RING_FLAGS_MPDU_END;
+       flags |= HTT_RX_RING_FLAGS_MSDU_START;
+       flags |= HTT_RX_RING_FLAGS_MSDU_END;
+       flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+       flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+       flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+       flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+       flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+       flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+       flags |= HTT_RX_RING_FLAGS_NULL_RX;
+       flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
+
+       fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+       ring->fw_idx_shadow_reg_paddr =
+               __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
+       ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
+       ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+       ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+       ring->flags = __cpu_to_le16(flags);
+       ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+
+       ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+       ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+       ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+       ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+       ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+       ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+       ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+       ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+       ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+       ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+
+#undef desc_offset
+
+       ATH10K_SKB_CB(skb)->htt.is_conf = true;
+
+       ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+
+       return 0;
+}
+
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+{
+       struct device *dev = htt->ar->dev;
+       struct ath10k_skb_cb *skb_cb;
+       struct sk_buff *txdesc = NULL;
+       struct htt_cmd *cmd;
+       u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+       int len = 0;
+       int msdu_id = -1;
+       int res;
+
+
+       res = ath10k_htt_tx_inc_pending(htt);
+       if (res)
+               return res;
+
+       len += sizeof(cmd->hdr);
+       len += sizeof(cmd->mgmt_tx);
+
+       txdesc = ath10k_htc_alloc_skb(len);
+       if (!txdesc) {
+               res = -ENOMEM;
+               goto err;
+       }
+
+       spin_lock_bh(&htt->tx_lock);
+       msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
+       if (msdu_id < 0) {
+               spin_unlock_bh(&htt->tx_lock);
+               res = msdu_id;
+               goto err;
+       }
+       htt->pending_tx[msdu_id] = txdesc;
+       spin_unlock_bh(&htt->tx_lock);
+
+       res = ath10k_skb_map(dev, msdu);
+       if (res)
+               goto err;
+
+       skb_put(txdesc, len);
+       cmd = (struct htt_cmd *)txdesc->data;
+       cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
+       cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
+       cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
+       cmd->mgmt_tx.desc_id    = __cpu_to_le32(msdu_id);
+       cmd->mgmt_tx.vdev_id    = __cpu_to_le32(vdev_id);
+       memcpy(cmd->mgmt_tx.hdr, msdu->data,
+              min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
+
+       /* refcount is decremented by HTC and HTT completions until it reaches
+        * zero and is freed */
+       skb_cb = ATH10K_SKB_CB(txdesc);
+       skb_cb->htt.msdu_id = msdu_id;
+       skb_cb->htt.refcount = 2;
+       skb_cb->htt.msdu = msdu;
+
+       res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
+       if (res)
+               goto err;
+
+       return 0;
+
+err:
+       ath10k_skb_unmap(dev, msdu);
+
+       if (txdesc)
+               dev_kfree_skb_any(txdesc);
+       if (msdu_id >= 0) {
+               spin_lock_bh(&htt->tx_lock);
+               htt->pending_tx[msdu_id] = NULL;
+               ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+               spin_unlock_bh(&htt->tx_lock);
+       }
+       ath10k_htt_tx_dec_pending(htt);
+       return res;
+}
+
+int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+{
+       struct device *dev = htt->ar->dev;
+       struct htt_cmd *cmd;
+       struct htt_data_tx_desc_frag *tx_frags;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+       struct ath10k_skb_cb *skb_cb;
+       struct sk_buff *txdesc = NULL;
+       struct sk_buff *txfrag = NULL;
+       u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+       u8 tid;
+       int prefetch_len, desc_len, frag_len;
+       dma_addr_t frags_paddr;
+       int msdu_id = -1;
+       int res;
+       u8 flags0;
+       u16 flags1;
+
+       res = ath10k_htt_tx_inc_pending(htt);
+       if (res)
+               return res;
+
+       prefetch_len = min(htt->prefetch_len, msdu->len);
+       prefetch_len = roundup(prefetch_len, 4);
+
+       desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
+       frag_len = sizeof(*tx_frags) * 2;
+
+       txdesc = ath10k_htc_alloc_skb(desc_len);
+       if (!txdesc) {
+               res = -ENOMEM;
+               goto err;
+       }
+
+       txfrag = dev_alloc_skb(frag_len);
+       if (!txfrag) {
+               res = -ENOMEM;
+               goto err;
+       }
+
+       if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
+               ath10k_warn("htt alignment check failed. dropping packet.\n");
+               res = -EIO;
+               goto err;
+       }
+
+       spin_lock_bh(&htt->tx_lock);
+       msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
+       if (msdu_id < 0) {
+               spin_unlock_bh(&htt->tx_lock);
+               res = msdu_id;
+               goto err;
+       }
+       htt->pending_tx[msdu_id] = txdesc;
+       spin_unlock_bh(&htt->tx_lock);
+
+       res = ath10k_skb_map(dev, msdu);
+       if (res)
+               goto err;
+
+       /* tx fragment list must be terminated with zero-entry */
+       skb_put(txfrag, frag_len);
+       tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
+       tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
+       tx_frags[0].len   = __cpu_to_le32(msdu->len);
+       tx_frags[1].paddr = __cpu_to_le32(0);
+       tx_frags[1].len   = __cpu_to_le32(0);
+
+       res = ath10k_skb_map(dev, txfrag);
+       if (res)
+               goto err;
+
+       ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n",
+                  (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
+                  (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
+                       txfrag->data, frag_len);
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
+                       msdu->data, msdu->len);
+
+       skb_put(txdesc, desc_len);
+       cmd = (struct htt_cmd *)txdesc->data;
+       memset(cmd, 0, desc_len);
+
+       tid = ATH10K_SKB_CB(msdu)->htt.tid;
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);
+
+       flags0  = 0;
+       if (!ieee80211_has_protected(hdr->frame_control))
+               flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+       flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+       flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+                    HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+
+       flags1  = 0;
+       flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+       flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+
+       frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
+
+       cmd->hdr.msg_type        = HTT_H2T_MSG_TYPE_TX_FRM;
+       cmd->data_tx.flags0      = flags0;
+       cmd->data_tx.flags1      = __cpu_to_le16(flags1);
+       cmd->data_tx.len         = __cpu_to_le16(msdu->len);
+       cmd->data_tx.id          = __cpu_to_le16(msdu_id);
+       cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+       cmd->data_tx.peerid      = __cpu_to_le32(HTT_INVALID_PEERID);
+
+       memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
+
+       /* refcount is decremented by HTC and HTT completions until it reaches
+        * zero and is freed */
+       skb_cb = ATH10K_SKB_CB(txdesc);
+       skb_cb->htt.msdu_id = msdu_id;
+       skb_cb->htt.refcount = 2;
+       skb_cb->htt.txfrag = txfrag;
+       skb_cb->htt.msdu = msdu;
+
+       res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
+       if (res)
+               goto err;
+
+       return 0;
+err:
+       if (txfrag)
+               ath10k_skb_unmap(dev, txfrag);
+       if (txdesc)
+               dev_kfree_skb_any(txdesc);
+       if (txfrag)
+               dev_kfree_skb_any(txfrag);
+       if (msdu_id >= 0) {
+               spin_lock_bh(&htt->tx_lock);
+               htt->pending_tx[msdu_id] = NULL;
+               ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+               spin_unlock_bh(&htt->tx_lock);
+       }
+       ath10k_htt_tx_dec_pending(htt);
+       ath10k_skb_unmap(dev, msdu);
+       return res;
+}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
new file mode 100644 (file)
index 0000000..44ed5af
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HW_H_
+#define _HW_H_
+
+#include "targaddrs.h"
+
+/* Supported FW version */
+#define SUPPORTED_FW_MAJOR     1
+#define SUPPORTED_FW_MINOR     0
+#define SUPPORTED_FW_RELEASE   0
+#define SUPPORTED_FW_BUILD     629
+
+/* QCA988X 1.0 definitions */
+#define QCA988X_HW_1_0_VERSION         0x4000002c
+#define QCA988X_HW_1_0_FW_DIR          "ath10k/QCA988X/hw1.0"
+#define QCA988X_HW_1_0_FW_FILE         "firmware.bin"
+#define QCA988X_HW_1_0_OTP_FILE                "otp.bin"
+#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA988X 2.0 definitions */
+#define QCA988X_HW_2_0_VERSION         0x4100016c
+#define QCA988X_HW_2_0_FW_DIR          "ath10k/QCA988X/hw2.0"
+#define QCA988X_HW_2_0_FW_FILE         "firmware.bin"
+#define QCA988X_HW_2_0_OTP_FILE                "otp.bin"
+#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
+/* Known pecularities:
+ *  - current FW doesn't support raw rx mode (last tested v599)
+ *  - current FW dumps upon raw tx mode (last tested v599)
+ *  - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
+ *  - raw have FCS, nwifi doesn't
+ *  - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
+ *    param, llc/snap) are aligned to 4byte boundaries each */
+enum ath10k_hw_txrx_mode {
+       ATH10K_HW_TXRX_RAW = 0,
+       ATH10K_HW_TXRX_NATIVE_WIFI = 1,
+       ATH10K_HW_TXRX_ETHERNET = 2,
+};
+
+enum ath10k_mcast2ucast_mode {
+       ATH10K_MCAST2UCAST_DISABLED = 0,
+       ATH10K_MCAST2UCAST_ENABLED = 1,
+};
+
+#define TARGET_NUM_VDEVS                       8
+#define TARGET_NUM_PEER_AST                    2
+#define TARGET_NUM_WDS_ENTRIES                 32
+#define TARGET_DMA_BURST_SIZE                  0
+#define TARGET_MAC_AGGR_DELIM                  0
+#define TARGET_AST_SKID_LIMIT                  16
+#define TARGET_NUM_PEERS                       16
+#define TARGET_NUM_OFFLOAD_PEERS               0
+#define TARGET_NUM_OFFLOAD_REORDER_BUFS         0
+#define TARGET_NUM_PEER_KEYS                   2
+#define TARGET_NUM_TIDS                (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS)))
+#define TARGET_TX_CHAIN_MASK                   (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_CHAIN_MASK                   (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_TIMEOUT_LO_PRI               100
+#define TARGET_RX_TIMEOUT_HI_PRI               40
+#define TARGET_RX_DECAP_MODE                   ATH10K_HW_TXRX_ETHERNET
+#define TARGET_SCAN_MAX_PENDING_REQS           4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV          3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV           3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES    8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV            3
+#define TARGET_NUM_MCAST_GROUPS                        0
+#define TARGET_NUM_MCAST_TABLE_ELEMS           0
+#define TARGET_MCAST2UCAST_MODE                        ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_TX_DBG_LOG_SIZE                 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0
+#define TARGET_VOW_CONFIG                      0
+#define TARGET_NUM_MSDU_DESC                   (1024 + 400)
+#define TARGET_MAX_FRAG_ENTRIES                        0
+
+
+/* Number of Copy Engines supported */
+#define CE_COUNT 8
+
+/*
+ * Total number of PCIe MSI interrupts requested for all interrupt sources.
+ * PCIe standard forces this to be a power of 2.
+ * Some Host OS's limit MSI requests that can be granted to 8
+ * so for now we abide by this limit and avoid requesting more
+ * than that.
+ */
+#define MSI_NUM_REQUEST_LOG2   3
+#define MSI_NUM_REQUEST                (1<<MSI_NUM_REQUEST_LOG2)
+
+/*
+ * Granted MSIs are assigned as follows:
+ * Firmware uses the first
+ * Remaining MSIs, if any, are used by Copy Engines
+ * This mapping is known to both Target firmware and Host software.
+ * It may be changed as long as Host and Target are kept in sync.
+ */
+/* MSI for firmware (errors, etc.) */
+#define MSI_ASSIGN_FW          0
+
+/* MSIs for Copy Engines */
+#define MSI_ASSIGN_CE_INITIAL  1
+#define MSI_ASSIGN_CE_MAX      7
+
+/* as of IP3.7.1 */
+#define RTC_STATE_V_ON                         3
+
+#define RTC_STATE_COLD_RESET_MASK              0x00000400
+#define RTC_STATE_V_LSB                                0
+#define RTC_STATE_V_MASK                       0x00000007
+#define RTC_STATE_ADDRESS                      0x0000
+#define PCIE_SOC_WAKE_V_MASK                   0x00000001
+#define PCIE_SOC_WAKE_ADDRESS                  0x0004
+#define PCIE_SOC_WAKE_RESET                    0x00000000
+#define SOC_GLOBAL_RESET_ADDRESS               0x0008
+
+#define RTC_SOC_BASE_ADDRESS                   0x00004000
+#define RTC_WMAC_BASE_ADDRESS                  0x00005000
+#define MAC_COEX_BASE_ADDRESS                  0x00006000
+#define BT_COEX_BASE_ADDRESS                   0x00007000
+#define SOC_PCIE_BASE_ADDRESS                  0x00008000
+#define SOC_CORE_BASE_ADDRESS                  0x00009000
+#define WLAN_UART_BASE_ADDRESS                 0x0000c000
+#define WLAN_SI_BASE_ADDRESS                   0x00010000
+#define WLAN_GPIO_BASE_ADDRESS                 0x00014000
+#define WLAN_ANALOG_INTF_BASE_ADDRESS          0x0001c000
+#define WLAN_MAC_BASE_ADDRESS                  0x00020000
+#define EFUSE_BASE_ADDRESS                     0x00030000
+#define FPGA_REG_BASE_ADDRESS                  0x00039000
+#define WLAN_UART2_BASE_ADDRESS                        0x00054c00
+#define CE_WRAPPER_BASE_ADDRESS                        0x00057000
+#define CE0_BASE_ADDRESS                       0x00057400
+#define CE1_BASE_ADDRESS                       0x00057800
+#define CE2_BASE_ADDRESS                       0x00057c00
+#define CE3_BASE_ADDRESS                       0x00058000
+#define CE4_BASE_ADDRESS                       0x00058400
+#define CE5_BASE_ADDRESS                       0x00058800
+#define CE6_BASE_ADDRESS                       0x00058c00
+#define CE7_BASE_ADDRESS                       0x00059000
+#define DBI_BASE_ADDRESS                       0x00060000
+#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS     0x0006c000
+#define PCIE_LOCAL_BASE_ADDRESS                        0x00080000
+
+#define SOC_RESET_CONTROL_OFFSET               0x00000000
+#define SOC_RESET_CONTROL_SI0_RST_MASK         0x00000001
+#define SOC_CPU_CLOCK_OFFSET                   0x00000020
+#define SOC_CPU_CLOCK_STANDARD_LSB             0
+#define SOC_CPU_CLOCK_STANDARD_MASK            0x00000003
+#define SOC_CLOCK_CONTROL_OFFSET               0x00000028
+#define SOC_CLOCK_CONTROL_SI0_CLK_MASK         0x00000001
+#define SOC_SYSTEM_SLEEP_OFFSET                        0x000000c4
+#define SOC_LPO_CAL_OFFSET                     0x000000e0
+#define SOC_LPO_CAL_ENABLE_LSB                 20
+#define SOC_LPO_CAL_ENABLE_MASK                        0x00100000
+
+#define WLAN_RESET_CONTROL_COLD_RST_MASK       0x00000008
+#define WLAN_RESET_CONTROL_WARM_RST_MASK       0x00000004
+#define WLAN_SYSTEM_SLEEP_DISABLE_LSB          0
+#define WLAN_SYSTEM_SLEEP_DISABLE_MASK         0x00000001
+
+#define WLAN_GPIO_PIN0_ADDRESS                 0x00000028
+#define WLAN_GPIO_PIN0_CONFIG_MASK             0x00007800
+#define WLAN_GPIO_PIN1_ADDRESS                 0x0000002c
+#define WLAN_GPIO_PIN1_CONFIG_MASK             0x00007800
+#define WLAN_GPIO_PIN10_ADDRESS                        0x00000050
+#define WLAN_GPIO_PIN11_ADDRESS                        0x00000054
+#define WLAN_GPIO_PIN12_ADDRESS                        0x00000058
+#define WLAN_GPIO_PIN13_ADDRESS                        0x0000005c
+
+#define CLOCK_GPIO_OFFSET                      0xffffffff
+#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB           0
+#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK          0
+
+#define SI_CONFIG_OFFSET                       0x00000000
+#define SI_CONFIG_BIDIR_OD_DATA_LSB            18
+#define SI_CONFIG_BIDIR_OD_DATA_MASK           0x00040000
+#define SI_CONFIG_I2C_LSB                      16
+#define SI_CONFIG_I2C_MASK                     0x00010000
+#define SI_CONFIG_POS_SAMPLE_LSB               7
+#define SI_CONFIG_POS_SAMPLE_MASK              0x00000080
+#define SI_CONFIG_INACTIVE_DATA_LSB            5
+#define SI_CONFIG_INACTIVE_DATA_MASK           0x00000020
+#define SI_CONFIG_INACTIVE_CLK_LSB             4
+#define SI_CONFIG_INACTIVE_CLK_MASK            0x00000010
+#define SI_CONFIG_DIVIDER_LSB                  0
+#define SI_CONFIG_DIVIDER_MASK                 0x0000000f
+#define SI_CS_OFFSET                           0x00000004
+#define SI_CS_DONE_ERR_MASK                    0x00000400
+#define SI_CS_DONE_INT_MASK                    0x00000200
+#define SI_CS_START_LSB                                8
+#define SI_CS_START_MASK                       0x00000100
+#define SI_CS_RX_CNT_LSB                       4
+#define SI_CS_RX_CNT_MASK                      0x000000f0
+#define SI_CS_TX_CNT_LSB                       0
+#define SI_CS_TX_CNT_MASK                      0x0000000f
+
+#define SI_TX_DATA0_OFFSET                     0x00000008
+#define SI_TX_DATA1_OFFSET                     0x0000000c
+#define SI_RX_DATA0_OFFSET                     0x00000010
+#define SI_RX_DATA1_OFFSET                     0x00000014
+
+#define CORE_CTRL_CPU_INTR_MASK                        0x00002000
+#define CORE_CTRL_ADDRESS                      0x0000
+#define PCIE_INTR_ENABLE_ADDRESS               0x0008
+#define PCIE_INTR_CLR_ADDRESS                  0x0014
+#define SCRATCH_3_ADDRESS                      0x0030
+
+/* Firmware indications to the Host via SCRATCH_3 register. */
+#define FW_INDICATOR_ADDRESS   (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
+#define FW_IND_EVENT_PENDING                   1
+#define FW_IND_INITIALIZED                     2
+
+/* HOST_REG interrupt from firmware */
+#define PCIE_INTR_FIRMWARE_MASK                        0x00000400
+#define PCIE_INTR_CE_MASK_ALL                  0x0007f800
+
+#define DRAM_BASE_ADDRESS                      0x00400000
+
+#define MISSING 0
+
+#define SYSTEM_SLEEP_OFFSET                    SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_SYSTEM_SLEEP_OFFSET               SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_RESET_CONTROL_OFFSET              SOC_RESET_CONTROL_OFFSET
+#define CLOCK_CONTROL_OFFSET                   SOC_CLOCK_CONTROL_OFFSET
+#define CLOCK_CONTROL_SI0_CLK_MASK             SOC_CLOCK_CONTROL_SI0_CLK_MASK
+#define RESET_CONTROL_MBOX_RST_MASK            MISSING
+#define RESET_CONTROL_SI0_RST_MASK             SOC_RESET_CONTROL_SI0_RST_MASK
+#define GPIO_BASE_ADDRESS                      WLAN_GPIO_BASE_ADDRESS
+#define GPIO_PIN0_OFFSET                       WLAN_GPIO_PIN0_ADDRESS
+#define GPIO_PIN1_OFFSET                       WLAN_GPIO_PIN1_ADDRESS
+#define GPIO_PIN0_CONFIG_MASK                  WLAN_GPIO_PIN0_CONFIG_MASK
+#define GPIO_PIN1_CONFIG_MASK                  WLAN_GPIO_PIN1_CONFIG_MASK
+#define SI_BASE_ADDRESS                                WLAN_SI_BASE_ADDRESS
+#define SCRATCH_BASE_ADDRESS                   SOC_CORE_BASE_ADDRESS
+#define LOCAL_SCRATCH_OFFSET                   0x18
+#define CPU_CLOCK_OFFSET                       SOC_CPU_CLOCK_OFFSET
+#define LPO_CAL_OFFSET                         SOC_LPO_CAL_OFFSET
+#define GPIO_PIN10_OFFSET                      WLAN_GPIO_PIN10_ADDRESS
+#define GPIO_PIN11_OFFSET                      WLAN_GPIO_PIN11_ADDRESS
+#define GPIO_PIN12_OFFSET                      WLAN_GPIO_PIN12_ADDRESS
+#define GPIO_PIN13_OFFSET                      WLAN_GPIO_PIN13_ADDRESS
+#define CPU_CLOCK_STANDARD_LSB                 SOC_CPU_CLOCK_STANDARD_LSB
+#define CPU_CLOCK_STANDARD_MASK                        SOC_CPU_CLOCK_STANDARD_MASK
+#define LPO_CAL_ENABLE_LSB                     SOC_LPO_CAL_ENABLE_LSB
+#define LPO_CAL_ENABLE_MASK                    SOC_LPO_CAL_ENABLE_MASK
+#define ANALOG_INTF_BASE_ADDRESS               WLAN_ANALOG_INTF_BASE_ADDRESS
+#define MBOX_BASE_ADDRESS                      MISSING
+#define INT_STATUS_ENABLE_ERROR_LSB            MISSING
+#define INT_STATUS_ENABLE_ERROR_MASK           MISSING
+#define INT_STATUS_ENABLE_CPU_LSB              MISSING
+#define INT_STATUS_ENABLE_CPU_MASK             MISSING
+#define INT_STATUS_ENABLE_COUNTER_LSB          MISSING
+#define INT_STATUS_ENABLE_COUNTER_MASK         MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_LSB                MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_MASK       MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB   MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK  MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB    MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK   MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_LSB      MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_MASK     MISSING
+#define INT_STATUS_ENABLE_ADDRESS              MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_LSB          MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_MASK         MISSING
+#define HOST_INT_STATUS_ADDRESS                        MISSING
+#define CPU_INT_STATUS_ADDRESS                 MISSING
+#define ERROR_INT_STATUS_ADDRESS               MISSING
+#define ERROR_INT_STATUS_WAKEUP_MASK           MISSING
+#define ERROR_INT_STATUS_WAKEUP_LSB            MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK     MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB      MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_MASK      MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_LSB       MISSING
+#define COUNT_DEC_ADDRESS                      MISSING
+#define HOST_INT_STATUS_CPU_MASK               MISSING
+#define HOST_INT_STATUS_CPU_LSB                        MISSING
+#define HOST_INT_STATUS_ERROR_MASK             MISSING
+#define HOST_INT_STATUS_ERROR_LSB              MISSING
+#define HOST_INT_STATUS_COUNTER_MASK           MISSING
+#define HOST_INT_STATUS_COUNTER_LSB            MISSING
+#define RX_LOOKAHEAD_VALID_ADDRESS             MISSING
+#define WINDOW_DATA_ADDRESS                    MISSING
+#define WINDOW_READ_ADDR_ADDRESS               MISSING
+#define WINDOW_WRITE_ADDR_ADDRESS              MISSING
+
+#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
+
+#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
new file mode 100644 (file)
index 0000000..3446c98
--- /dev/null
@@ -0,0 +1,3066 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "htt.h"
+#include "txrx.h"
+
+/**********/
+/* Crypto */
+/**********/
+
+static int ath10k_send_key(struct ath10k_vif *arvif,
+                          struct ieee80211_key_conf *key,
+                          enum set_key_cmd cmd,
+                          const u8 *macaddr)
+{
+       struct wmi_vdev_install_key_arg arg = {
+               .vdev_id = arvif->vdev_id,
+               .key_idx = key->keyidx,
+               .key_len = key->keylen,
+               .key_data = key->key,
+               .macaddr = macaddr,
+       };
+
+       if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+               arg.key_flags = WMI_KEY_PAIRWISE;
+       else
+               arg.key_flags = WMI_KEY_GROUP;
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               arg.key_cipher = WMI_CIPHER_AES_CCM;
+               key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+               arg.key_cipher = WMI_CIPHER_TKIP;
+               arg.key_txmic_len = 8;
+               arg.key_rxmic_len = 8;
+               break;
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               arg.key_cipher = WMI_CIPHER_WEP;
+               /* AP/IBSS mode requires self-key to be groupwise
+                * Otherwise pairwise key must be set */
+               if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
+                       arg.key_flags = WMI_KEY_PAIRWISE;
+               break;
+       default:
+               ath10k_warn("cipher %d is not supported\n", key->cipher);
+               return -EOPNOTSUPP;
+       }
+
+       if (cmd == DISABLE_KEY) {
+               arg.key_cipher = WMI_CIPHER_NONE;
+               arg.key_data = NULL;
+       }
+
+       return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
+}
+
+static int ath10k_install_key(struct ath10k_vif *arvif,
+                             struct ieee80211_key_conf *key,
+                             enum set_key_cmd cmd,
+                             const u8 *macaddr)
+{
+       struct ath10k *ar = arvif->ar;
+       int ret;
+
+       INIT_COMPLETION(ar->install_key_done);
+
+       ret = ath10k_send_key(arvif, key, cmd, macaddr);
+       if (ret)
+               return ret;
+
+       ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ);
+       if (ret == 0)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
+                                       const u8 *addr)
+{
+       struct ath10k *ar = arvif->ar;
+       struct ath10k_peer *peer;
+       int ret;
+       int i;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+       spin_unlock_bh(&ar->data_lock);
+
+       if (!peer)
+               return -ENOENT;
+
+       for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
+               if (arvif->wep_keys[i] == NULL)
+                       continue;
+
+               ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
+                                        addr);
+               if (ret)
+                       return ret;
+
+               peer->keys[i] = arvif->wep_keys[i];
+       }
+
+       return 0;
+}
+
+static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
+                                 const u8 *addr)
+{
+       struct ath10k *ar = arvif->ar;
+       struct ath10k_peer *peer;
+       int first_errno = 0;
+       int ret;
+       int i;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+       spin_unlock_bh(&ar->data_lock);
+
+       if (!peer)
+               return -ENOENT;
+
+       for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+               if (peer->keys[i] == NULL)
+                       continue;
+
+               ret = ath10k_install_key(arvif, peer->keys[i],
+                                        DISABLE_KEY, addr);
+               if (ret && first_errno == 0)
+                       first_errno = ret;
+
+               if (ret)
+                       ath10k_warn("could not remove peer wep key %d (%d)\n",
+                                   i, ret);
+
+               peer->keys[i] = NULL;
+       }
+
+       return first_errno;
+}
+
+static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
+                                struct ieee80211_key_conf *key)
+{
+       struct ath10k *ar = arvif->ar;
+       struct ath10k_peer *peer;
+       u8 addr[ETH_ALEN];
+       int first_errno = 0;
+       int ret;
+       int i;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       for (;;) {
+               /* since ath10k_install_key we can't hold data_lock all the
+                * time, so we try to remove the keys incrementally */
+               spin_lock_bh(&ar->data_lock);
+               i = 0;
+               list_for_each_entry(peer, &ar->peers, list) {
+                       for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+                               if (peer->keys[i] == key) {
+                                       memcpy(addr, peer->addr, ETH_ALEN);
+                                       peer->keys[i] = NULL;
+                                       break;
+                               }
+                       }
+
+                       if (i < ARRAY_SIZE(peer->keys))
+                               break;
+               }
+               spin_unlock_bh(&ar->data_lock);
+
+               if (i == ARRAY_SIZE(peer->keys))
+                       break;
+
+               ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr);
+               if (ret && first_errno == 0)
+                       first_errno = ret;
+
+               if (ret)
+                       ath10k_warn("could not remove key for %pM\n", addr);
+       }
+
+       return first_errno;
+}
+
+
+/*********************/
+/* General utilities */
+/*********************/
+
+static inline enum wmi_phy_mode
+chan_to_phymode(const struct cfg80211_chan_def *chandef)
+{
+       enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+       switch (chandef->chan->band) {
+       case IEEE80211_BAND_2GHZ:
+               switch (chandef->width) {
+               case NL80211_CHAN_WIDTH_20_NOHT:
+                       phymode = MODE_11G;
+                       break;
+               case NL80211_CHAN_WIDTH_20:
+                       phymode = MODE_11NG_HT20;
+                       break;
+               case NL80211_CHAN_WIDTH_40:
+                       phymode = MODE_11NG_HT40;
+                       break;
+               case NL80211_CHAN_WIDTH_80:
+               case NL80211_CHAN_WIDTH_80P80:
+               case NL80211_CHAN_WIDTH_160:
+                       phymode = MODE_UNKNOWN;
+                       break;
+               }
+               break;
+       case IEEE80211_BAND_5GHZ:
+               switch (chandef->width) {
+               case NL80211_CHAN_WIDTH_20_NOHT:
+                       phymode = MODE_11A;
+                       break;
+               case NL80211_CHAN_WIDTH_20:
+                       phymode = MODE_11NA_HT20;
+                       break;
+               case NL80211_CHAN_WIDTH_40:
+                       phymode = MODE_11NA_HT40;
+                       break;
+               case NL80211_CHAN_WIDTH_80:
+                       phymode = MODE_11AC_VHT80;
+                       break;
+               case NL80211_CHAN_WIDTH_80P80:
+               case NL80211_CHAN_WIDTH_160:
+                       phymode = MODE_UNKNOWN;
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+
+       WARN_ON(phymode == MODE_UNKNOWN);
+       return phymode;
+}
+
+static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
+{
+/*
+ * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ *   0 for no restriction
+ *   1 for 1/4 us
+ *   2 for 1/2 us
+ *   3 for 1 us
+ *   4 for 2 us
+ *   5 for 4 us
+ *   6 for 8 us
+ *   7 for 16 us
+ */
+       switch (mpdudensity) {
+       case 0:
+               return 0;
+       case 1:
+       case 2:
+       case 3:
+       /* Our lower layer calculations limit our precision to
+          1 microsecond */
+               return 1;
+       case 4:
+               return 2;
+       case 5:
+               return 4;
+       case 6:
+               return 8;
+       case 7:
+               return 16;
+       default:
+               return 0;
+       }
+}
+
+static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
+       if (ret)
+               return ret;
+
+       ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
+       if (ret)
+               return ret;
+
+       ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
+{
+       struct ath10k_peer *peer, *tmp;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+               if (peer->vdev_id != vdev_id)
+                       continue;
+
+               ath10k_warn("removing stale peer %pM from vdev_id %d\n",
+                           peer->addr, vdev_id);
+
+               list_del(&peer->list);
+               kfree(peer);
+       }
+       spin_unlock_bh(&ar->data_lock);
+}
+
+/************************/
+/* Interface management */
+/************************/
+
+static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
+{
+       int ret;
+
+       ret = wait_for_completion_timeout(&ar->vdev_setup_done,
+                                         ATH10K_VDEV_SETUP_TIMEOUT_HZ);
+       if (ret == 0)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int ath10k_vdev_start(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       struct ieee80211_conf *conf = &ar->hw->conf;
+       struct ieee80211_channel *channel = conf->chandef.chan;
+       struct wmi_vdev_start_request_arg arg = {};
+       int ret = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       INIT_COMPLETION(ar->vdev_setup_done);
+
+       arg.vdev_id = arvif->vdev_id;
+       arg.dtim_period = arvif->dtim_period;
+       arg.bcn_intval = arvif->beacon_interval;
+
+       arg.channel.freq = channel->center_freq;
+
+       arg.channel.band_center_freq1 = conf->chandef.center_freq1;
+
+       arg.channel.mode = chan_to_phymode(&conf->chandef);
+
+       arg.channel.min_power = channel->max_power * 3;
+       arg.channel.max_power = channel->max_power * 4;
+       arg.channel.max_reg_power = channel->max_reg_power * 4;
+       arg.channel.max_antenna_gain = channel->max_antenna_gain;
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+               arg.ssid = arvif->u.ap.ssid;
+               arg.ssid_len = arvif->u.ap.ssid_len;
+               arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+       } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+               arg.ssid = arvif->vif->bss_conf.ssid;
+               arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+       }
+
+       ret = ath10k_wmi_vdev_start(ar, &arg);
+       if (ret) {
+               ath10k_warn("WMI vdev start failed: ret %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret) {
+               ath10k_warn("vdev setup failed %d\n", ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       INIT_COMPLETION(ar->vdev_setup_done);
+
+       ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+       if (ret) {
+               ath10k_warn("WMI vdev stop failed: ret %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret) {
+               ath10k_warn("vdev setup failed %d\n", ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
+{
+       struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
+       struct wmi_vdev_start_request_arg arg = {};
+       enum nl80211_channel_type type;
+       int ret = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       type = cfg80211_get_chandef_type(&ar->hw->conf.chandef);
+
+       arg.vdev_id = vdev_id;
+       arg.channel.freq = channel->center_freq;
+       arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
+
+       /* TODO setup this dynamically, what in case we
+          don't have any vifs? */
+       arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef);
+
+       arg.channel.min_power = channel->max_power * 3;
+       arg.channel.max_power = channel->max_power * 4;
+       arg.channel.max_reg_power = channel->max_reg_power * 4;
+       arg.channel.max_antenna_gain = channel->max_antenna_gain;
+
+       ret = ath10k_wmi_vdev_start(ar, &arg);
+       if (ret) {
+               ath10k_warn("Monitor vdev start failed: ret %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret) {
+               ath10k_warn("Monitor vdev setup failed %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+       if (ret) {
+               ath10k_warn("Monitor vdev up failed: %d\n", ret);
+               goto vdev_stop;
+       }
+
+       ar->monitor_vdev_id = vdev_id;
+       ar->monitor_enabled = true;
+
+       return 0;
+
+vdev_stop:
+       ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+       if (ret)
+               ath10k_warn("Monitor vdev stop failed: %d\n", ret);
+
+       return ret;
+}
+
+static int ath10k_monitor_stop(struct ath10k *ar)
+{
+       int ret = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       /* For some reasons, ath10k_wmi_vdev_down() here couse
+        * often ath10k_wmi_vdev_stop() to fail. Next we could
+        * not run monitor vdev and driver reload
+        * required. Don't see such problems we skip
+        * ath10k_wmi_vdev_down() here.
+        */
+
+       ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+       if (ret)
+               ath10k_warn("Monitor vdev stop failed: %d\n", ret);
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret)
+               ath10k_warn("Monitor_down sync failed: %d\n", ret);
+
+       ar->monitor_enabled = false;
+       return ret;
+}
+
+static int ath10k_monitor_create(struct ath10k *ar)
+{
+       int bit, ret = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       if (ar->monitor_present) {
+               ath10k_warn("Monitor mode already enabled\n");
+               return 0;
+       }
+
+       bit = ffs(ar->free_vdev_map);
+       if (bit == 0) {
+               ath10k_warn("No free VDEV slots\n");
+               return -ENOMEM;
+       }
+
+       ar->monitor_vdev_id = bit - 1;
+       ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id);
+
+       ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
+                                    WMI_VDEV_TYPE_MONITOR,
+                                    0, ar->mac_addr);
+       if (ret) {
+               ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret);
+               goto vdev_fail;
+       }
+
+       ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n",
+                  ar->monitor_vdev_id);
+
+       ar->monitor_present = true;
+       return 0;
+
+vdev_fail:
+       /*
+        * Restore the ID to the global map.
+        */
+       ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
+       return ret;
+}
+
+static int ath10k_monitor_destroy(struct ath10k *ar)
+{
+       int ret = 0;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       if (!ar->monitor_present)
+               return 0;
+
+       ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+       if (ret) {
+               ath10k_warn("WMI vdev monitor delete failed: %d\n", ret);
+               return ret;
+       }
+
+       ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
+       ar->monitor_present = false;
+
+       ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n",
+                  ar->monitor_vdev_id);
+       return ret;
+}
+
+static void ath10k_control_beaconing(struct ath10k_vif *arvif,
+                               struct ieee80211_bss_conf *info)
+{
+       int ret = 0;
+
+       if (!info->enable_beacon) {
+               ath10k_vdev_stop(arvif);
+               return;
+       }
+
+       arvif->tx_seq_no = 0x1000;
+
+       ret = ath10k_vdev_start(arvif);
+       if (ret)
+               return;
+
+       ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, 0, info->bssid);
+       if (ret) {
+               ath10k_warn("Failed to bring up VDEV: %d\n",
+                           arvif->vdev_id);
+               return;
+       }
+       ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id);
+}
+
+static void ath10k_control_ibss(struct ath10k_vif *arvif,
+                               struct ieee80211_bss_conf *info,
+                               const u8 self_peer[ETH_ALEN])
+{
+       int ret = 0;
+
+       if (!info->ibss_joined) {
+               ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
+               if (ret)
+                       ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
+                                   self_peer, arvif->vdev_id, ret);
+
+               if (is_zero_ether_addr(arvif->u.ibss.bssid))
+                       return;
+
+               ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
+                                        arvif->u.ibss.bssid);
+               if (ret) {
+                       ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
+                                   arvif->u.ibss.bssid, arvif->vdev_id, ret);
+                       return;
+               }
+
+               memset(arvif->u.ibss.bssid, 0, ETH_ALEN);
+
+               return;
+       }
+
+       ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
+       if (ret) {
+               ath10k_warn("Failed to create IBSS self peer:%pM for VDEV:%d ret:%d\n",
+                           self_peer, arvif->vdev_id, ret);
+               return;
+       }
+
+       ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
+                                       WMI_VDEV_PARAM_ATIM_WINDOW,
+                                       ATH10K_DEFAULT_ATIM);
+       if (ret)
+               ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
+                           arvif->vdev_id, ret);
+}
+
+/*
+ * Review this when mac80211 gains per-interface powersave support.
+ */
+static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath10k_generic_iter *ar_iter = data;
+       struct ieee80211_conf *conf = &ar_iter->ar->hw->conf;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       enum wmi_sta_powersave_param param;
+       enum wmi_sta_ps_mode psmode;
+       int ret;
+
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       if (conf->flags & IEEE80211_CONF_PS) {
+               psmode = WMI_STA_PS_MODE_ENABLED;
+               param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+               ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar,
+                                                 arvif->vdev_id,
+                                                 param,
+                                                 conf->dynamic_ps_timeout);
+               if (ret) {
+                       ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
+                                   arvif->vdev_id);
+                       return;
+               }
+
+               ar_iter->ret = ret;
+       } else {
+               psmode = WMI_STA_PS_MODE_DISABLED;
+       }
+
+       ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
+                                            psmode);
+       if (ar_iter->ret)
+               ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
+                           psmode, arvif->vdev_id);
+       else
+               ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
+                          psmode, arvif->vdev_id);
+}
+
+/**********************/
+/* Station management */
+/**********************/
+
+static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
+                                     struct ath10k_vif *arvif,
+                                     struct ieee80211_sta *sta,
+                                     struct ieee80211_bss_conf *bss_conf,
+                                     struct wmi_peer_assoc_complete_arg *arg)
+{
+       memcpy(arg->addr, sta->addr, ETH_ALEN);
+       arg->vdev_id = arvif->vdev_id;
+       arg->peer_aid = sta->aid;
+       arg->peer_flags |= WMI_PEER_AUTH;
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+               /*
+                * Seems FW have problems with Power Save in STA
+                * mode when we setup this parameter to high (eg. 5).
+                * Often we see that FW don't send NULL (with clean P flags)
+                * frame even there is info about buffered frames in beacons.
+                * Sometimes we have to wait more than 10 seconds before FW
+                * will wakeup. Often sending one ping from AP to our device
+                * just fail (more than 50%).
+                *
+                * Seems setting this FW parameter to 1 couse FW
+                * will check every beacon and will wakup immediately
+                * after detection buffered data.
+                */
+               arg->peer_listen_intval = 1;
+       else
+               arg->peer_listen_intval = ar->hw->conf.listen_interval;
+
+       arg->peer_num_spatial_streams = 1;
+
+       /*
+        * The assoc capabilities are available only in managed mode.
+        */
+       if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf)
+               arg->peer_caps = bss_conf->assoc_capability;
+}
+
+static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
+                                      struct ath10k_vif *arvif,
+                                      struct wmi_peer_assoc_complete_arg *arg)
+{
+       struct ieee80211_vif *vif = arvif->vif;
+       struct ieee80211_bss_conf *info = &vif->bss_conf;
+       struct cfg80211_bss *bss;
+       const u8 *rsnie = NULL;
+       const u8 *wpaie = NULL;
+
+       bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
+                              info->bssid, NULL, 0, 0, 0);
+       if (bss) {
+               const struct cfg80211_bss_ies *ies;
+
+               rcu_read_lock();
+               rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+               ies = rcu_dereference(bss->ies);
+
+               wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+                               WLAN_OUI_TYPE_MICROSOFT_WPA,
+                               ies->data,
+                               ies->len);
+               rcu_read_unlock();
+               cfg80211_put_bss(ar->hw->wiphy, bss);
+       }
+
+       /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+       if (rsnie || wpaie) {
+               ath10k_dbg(ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
+               arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+       }
+
+       if (wpaie) {
+               ath10k_dbg(ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
+               arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+       }
+}
+
+static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
+                                     struct ieee80211_sta *sta,
+                                     struct wmi_peer_assoc_complete_arg *arg)
+{
+       struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+       const struct ieee80211_supported_band *sband;
+       const struct ieee80211_rate *rates;
+       u32 ratemask;
+       int i;
+
+       sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
+       ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+       rates = sband->bitrates;
+
+       rateset->num_rates = 0;
+
+       for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+               if (!(ratemask & 1))
+                       continue;
+
+               rateset->rates[rateset->num_rates] = rates->hw_value;
+               rateset->num_rates++;
+       }
+}
+
+static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
+                                  struct ieee80211_sta *sta,
+                                  struct wmi_peer_assoc_complete_arg *arg)
+{
+       const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       int smps;
+       int i, n;
+
+       if (!ht_cap->ht_supported)
+               return;
+
+       arg->peer_flags |= WMI_PEER_HT;
+       arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+                                   ht_cap->ampdu_factor)) - 1;
+
+       arg->peer_mpdu_density =
+               ath10k_parse_mpdudensity(ht_cap->ampdu_density);
+
+       arg->peer_ht_caps = ht_cap->cap;
+       arg->peer_rate_caps |= WMI_RC_HT_FLAG;
+
+       if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+               arg->peer_flags |= WMI_PEER_LDPC;
+
+       if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+               arg->peer_flags |= WMI_PEER_40MHZ;
+               arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
+       }
+
+       if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+               arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+
+       if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+               arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+
+       if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+               arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
+               arg->peer_flags |= WMI_PEER_STBC;
+       }
+
+       if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+               u32 stbc;
+               stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+               stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+               stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
+               arg->peer_rate_caps |= stbc;
+               arg->peer_flags |= WMI_PEER_STBC;
+       }
+
+       smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+       smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+       if (smps == WLAN_HT_CAP_SM_PS_STATIC) {
+               arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
+               arg->peer_flags |= WMI_PEER_STATIC_MIMOPS;
+       } else if (smps == WLAN_HT_CAP_SM_PS_DYNAMIC) {
+               arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
+               arg->peer_flags |= WMI_PEER_DYN_MIMOPS;
+       }
+
+       if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+               arg->peer_rate_caps |= WMI_RC_TS_FLAG;
+       else if (ht_cap->mcs.rx_mask[1])
+               arg->peer_rate_caps |= WMI_RC_DS_FLAG;
+
+       for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++)
+               if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
+                       arg->peer_ht_rates.rates[n++] = i;
+
+       arg->peer_ht_rates.num_rates = n;
+       arg->peer_num_spatial_streams = max((n+7) / 8, 1);
+
+       ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n",
+                  arg->peer_ht_rates.num_rates,
+                  arg->peer_num_spatial_streams);
+}
+
+static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
+                                      struct ath10k_vif *arvif,
+                                      struct ieee80211_sta *sta,
+                                      struct ieee80211_bss_conf *bss_conf,
+                                      struct wmi_peer_assoc_complete_arg *arg)
+{
+       u32 uapsd = 0;
+       u32 max_sp = 0;
+
+       if (sta->wme)
+               arg->peer_flags |= WMI_PEER_QOS;
+
+       if (sta->wme && sta->uapsd_queues) {
+               ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n",
+                          sta->uapsd_queues, sta->max_sp);
+
+               arg->peer_flags |= WMI_PEER_APSD;
+               arg->peer_flags |= WMI_RC_UAPSD_FLAG;
+
+               if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+                       uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+                                WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+               if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+                       uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+                                WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+               if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+                       uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+                                WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+               if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+                       uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+                                WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+
+               if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+                       max_sp = sta->max_sp;
+
+               ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+                                          sta->addr,
+                                          WMI_AP_PS_PEER_PARAM_UAPSD,
+                                          uapsd);
+
+               ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+                                          sta->addr,
+                                          WMI_AP_PS_PEER_PARAM_MAX_SP,
+                                          max_sp);
+
+               /* TODO setup this based on STA listen interval and
+                  beacon interval. Currently we don't know
+                  sta->listen_interval - mac80211 patch required.
+                  Currently use 10 seconds */
+               ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+                                          sta->addr,
+                                          WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
+                                          10);
+       }
+}
+
+static void ath10k_peer_assoc_h_qos_sta(struct ath10k *ar,
+                                       struct ath10k_vif *arvif,
+                                       struct ieee80211_sta *sta,
+                                       struct ieee80211_bss_conf *bss_conf,
+                                       struct wmi_peer_assoc_complete_arg *arg)
+{
+       if (bss_conf->qos)
+               arg->peer_flags |= WMI_PEER_QOS;
+}
+
+static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
+                                   struct ieee80211_sta *sta,
+                                   struct wmi_peer_assoc_complete_arg *arg)
+{
+       const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+       if (!vht_cap->vht_supported)
+               return;
+
+       arg->peer_flags |= WMI_PEER_VHT;
+
+       arg->peer_vht_caps = vht_cap->cap;
+
+       if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+               arg->peer_flags |= WMI_PEER_80MHZ;
+
+       arg->peer_vht_rates.rx_max_rate =
+               __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+       arg->peer_vht_rates.rx_mcs_set =
+               __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+       arg->peer_vht_rates.tx_max_rate =
+               __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+       arg->peer_vht_rates.tx_mcs_set =
+               __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
+
+       ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n");
+}
+
+static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
+                                   struct ath10k_vif *arvif,
+                                   struct ieee80211_sta *sta,
+                                   struct ieee80211_bss_conf *bss_conf,
+                                   struct wmi_peer_assoc_complete_arg *arg)
+{
+       switch (arvif->vdev_type) {
+       case WMI_VDEV_TYPE_AP:
+               ath10k_peer_assoc_h_qos_ap(ar, arvif, sta, bss_conf, arg);
+               break;
+       case WMI_VDEV_TYPE_STA:
+               ath10k_peer_assoc_h_qos_sta(ar, arvif, sta, bss_conf, arg);
+               break;
+       default:
+               break;
+       }
+}
+
+static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
+                                       struct ath10k_vif *arvif,
+                                       struct ieee80211_sta *sta,
+                                       struct wmi_peer_assoc_complete_arg *arg)
+{
+       enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+       /* FIXME: add VHT */
+
+       switch (ar->hw->conf.chandef.chan->band) {
+       case IEEE80211_BAND_2GHZ:
+               if (sta->ht_cap.ht_supported) {
+                       if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+                               phymode = MODE_11NG_HT40;
+                       else
+                               phymode = MODE_11NG_HT20;
+               } else {
+                       phymode = MODE_11G;
+               }
+
+               break;
+       case IEEE80211_BAND_5GHZ:
+               if (sta->ht_cap.ht_supported) {
+                       if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+                               phymode = MODE_11NA_HT40;
+                       else
+                               phymode = MODE_11NA_HT20;
+               } else {
+                       phymode = MODE_11A;
+               }
+
+               break;
+       default:
+               break;
+       }
+
+       arg->peer_phymode = phymode;
+       WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static int ath10k_peer_assoc(struct ath10k *ar,
+                            struct ath10k_vif *arvif,
+                            struct ieee80211_sta *sta,
+                            struct ieee80211_bss_conf *bss_conf)
+{
+       struct wmi_peer_assoc_complete_arg arg;
+
+       memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
+
+       ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
+       ath10k_peer_assoc_h_crypto(ar, arvif, &arg);
+       ath10k_peer_assoc_h_rates(ar, sta, &arg);
+       ath10k_peer_assoc_h_ht(ar, sta, &arg);
+       ath10k_peer_assoc_h_vht(ar, sta, &arg);
+       ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg);
+       ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg);
+
+       return ath10k_wmi_peer_assoc(ar, &arg);
+}
+
+/* can be called only in mac80211 callbacks due to `key_count` usage */
+static void ath10k_bss_assoc(struct ieee80211_hw *hw,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_bss_conf *bss_conf)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ieee80211_sta *ap_sta;
+       int ret;
+
+       rcu_read_lock();
+
+       ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+       if (!ap_sta) {
+               ath10k_warn("Failed to find station entry for %pM\n",
+                           bss_conf->bssid);
+               rcu_read_unlock();
+               return;
+       }
+
+       ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf);
+       if (ret) {
+               ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid);
+               rcu_read_unlock();
+               return;
+       }
+
+       rcu_read_unlock();
+
+       ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
+                                bss_conf->bssid);
+       if (ret)
+               ath10k_warn("VDEV: %d up failed: ret %d\n",
+                           arvif->vdev_id, ret);
+       else
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "VDEV: %d associated, BSSID: %pM, AID: %d\n",
+                          arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+}
+
+/*
+ * FIXME: flush TIDs
+ */
+static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       int ret;
+
+       /*
+        * For some reason, calling VDEV-DOWN before VDEV-STOP
+        * makes the FW to send frames via HTT after disassociation.
+        * No idea why this happens, even though VDEV-DOWN is supposed
+        * to be analogous to link down, so just stop the VDEV.
+        */
+       ret = ath10k_vdev_stop(arvif);
+       if (!ret)
+               ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
+                          arvif->vdev_id);
+
+       /*
+        * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
+        * report beacons from previously associated network through HTT.
+        * This in turn would spam mac80211 WARN_ON if we bring down all
+        * interfaces as it expects there is no rx when no interface is
+        * running.
+        */
+       ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+       if (ret)
+               ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
+                          arvif->vdev_id, ret);
+
+       ath10k_wmi_flush_tx(ar);
+
+       arvif->def_wep_key_index = 0;
+}
+
+static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
+                               struct ieee80211_sta *sta)
+{
+       int ret = 0;
+
+       ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
+       if (ret) {
+               ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
+               return ret;
+       }
+
+       ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
+       if (ret) {
+               ath10k_warn("could not install peer wep keys (%d)\n", ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
+                                  struct ieee80211_sta *sta)
+{
+       int ret = 0;
+
+       ret = ath10k_clear_peer_keys(arvif, sta->addr);
+       if (ret) {
+               ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+/**************/
+/* Regulatory */
+/**************/
+
+static int ath10k_update_channel_list(struct ath10k *ar)
+{
+       struct ieee80211_hw *hw = ar->hw;
+       struct ieee80211_supported_band **bands;
+       enum ieee80211_band band;
+       struct ieee80211_channel *channel;
+       struct wmi_scan_chan_list_arg arg = {0};
+       struct wmi_channel_arg *ch;
+       bool passive;
+       int len;
+       int ret;
+       int i;
+
+       bands = hw->wiphy->bands;
+       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               if (!bands[band])
+                       continue;
+
+               for (i = 0; i < bands[band]->n_channels; i++) {
+                       if (bands[band]->channels[i].flags &
+                           IEEE80211_CHAN_DISABLED)
+                               continue;
+
+                       arg.n_channels++;
+               }
+       }
+
+       len = sizeof(struct wmi_channel_arg) * arg.n_channels;
+       arg.channels = kzalloc(len, GFP_KERNEL);
+       if (!arg.channels)
+               return -ENOMEM;
+
+       ch = arg.channels;
+       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               if (!bands[band])
+                       continue;
+
+               for (i = 0; i < bands[band]->n_channels; i++) {
+                       channel = &bands[band]->channels[i];
+
+                       if (channel->flags & IEEE80211_CHAN_DISABLED)
+                               continue;
+
+                       ch->allow_ht   = true;
+
+                       /* FIXME: when should we really allow VHT? */
+                       ch->allow_vht = true;
+
+                       ch->allow_ibss =
+                               !(channel->flags & IEEE80211_CHAN_NO_IBSS);
+
+                       ch->ht40plus =
+                               !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
+
+                       passive = channel->flags & IEEE80211_CHAN_PASSIVE_SCAN;
+                       ch->passive = passive;
+
+                       ch->freq = channel->center_freq;
+                       ch->min_power = channel->max_power * 3;
+                       ch->max_power = channel->max_power * 4;
+                       ch->max_reg_power = channel->max_reg_power * 4;
+                       ch->max_antenna_gain = channel->max_antenna_gain;
+                       ch->reg_class_id = 0; /* FIXME */
+
+                       /* FIXME: why use only legacy modes, why not any
+                        * HT/VHT modes? Would that even make any
+                        * difference? */
+                       if (channel->band == IEEE80211_BAND_2GHZ)
+                               ch->mode = MODE_11G;
+                       else
+                               ch->mode = MODE_11A;
+
+                       if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
+                               continue;
+
+                       ath10k_dbg(ATH10K_DBG_WMI,
+                                  "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+                                  __func__, ch - arg.channels, arg.n_channels,
+                                  ch->freq, ch->max_power, ch->max_reg_power,
+                                  ch->max_antenna_gain, ch->mode);
+
+                       ch++;
+               }
+       }
+
+       ret = ath10k_wmi_scan_chan_list(ar, &arg);
+       kfree(arg.channels);
+
+       return ret;
+}
+
+static void ath10k_reg_notifier(struct wiphy *wiphy,
+                               struct regulatory_request *request)
+{
+       struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+       struct reg_dmn_pair_mapping *regpair;
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+
+       ret = ath10k_update_channel_list(ar);
+       if (ret)
+               ath10k_warn("could not update channel list (%d)\n", ret);
+
+       regpair = ar->ath_common.regulatory.regpair;
+       /* Target allows setting up per-band regdomain but ath_common provides
+        * a combined one only */
+       ret = ath10k_wmi_pdev_set_regdomain(ar,
+                                           regpair->regDmnEnum,
+                                           regpair->regDmnEnum, /* 2ghz */
+                                           regpair->regDmnEnum, /* 5ghz */
+                                           regpair->reg_2ghz_ctl,
+                                           regpair->reg_5ghz_ctl);
+       if (ret)
+               ath10k_warn("could not set pdev regdomain (%d)\n", ret);
+}
+
+/***************/
+/* TX handlers */
+/***************/
+
+/*
+ * Frames sent to the FW have to be in "Native Wifi" format.
+ * Strip the QoS field from the 802.11 header.
+ */
+static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
+                                      struct ieee80211_tx_control *control,
+                                      struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       u8 *qos_ctl;
+
+       if (!ieee80211_is_data_qos(hdr->frame_control))
+               return;
+
+       qos_ctl = ieee80211_get_qos_ctl(hdr);
+       memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN,
+               skb->len - ieee80211_hdrlen(hdr->frame_control));
+       skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN);
+}
+
+static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_vif *vif = info->control.vif;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k *ar = arvif->ar;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_key_conf *key = info->control.hw_key;
+       int ret;
+
+       /* TODO AP mode should be implemented */
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       if (!ieee80211_has_protected(hdr->frame_control))
+               return;
+
+       if (!key)
+               return;
+
+       if (key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+           key->cipher != WLAN_CIPHER_SUITE_WEP104)
+               return;
+
+       if (key->keyidx == arvif->def_wep_key_index)
+               return;
+
+       ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx);
+
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                       WMI_VDEV_PARAM_DEF_KEYID,
+                                       key->keyidx);
+       if (ret) {
+               ath10k_warn("could not update wep keyidx (%d)\n", ret);
+               return;
+       }
+
+       arvif->def_wep_key_index = key->keyidx;
+}
+
+static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_vif *vif = info->control.vif;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+       /* This is case only for P2P_GO */
+       if (arvif->vdev_type != WMI_VDEV_TYPE_AP ||
+           arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+               return;
+
+       if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
+               spin_lock_bh(&ar->data_lock);
+               if (arvif->u.ap.noa_data)
+                       if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
+                                             GFP_ATOMIC))
+                               memcpy(skb_put(skb, arvif->u.ap.noa_len),
+                                      arvif->u.ap.noa_data,
+                                      arvif->u.ap.noa_len);
+               spin_unlock_bh(&ar->data_lock);
+       }
+}
+
+static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       int ret;
+
+       if (ieee80211_is_mgmt(hdr->frame_control))
+               ret = ath10k_htt_mgmt_tx(ar->htt, skb);
+       else if (ieee80211_is_nullfunc(hdr->frame_control))
+               /* FW does not report tx status properly for NullFunc frames
+                * unless they are sent through mgmt tx path. mac80211 sends
+                * those frames when it detects link/beacon loss and depends on
+                * the tx status to be correct. */
+               ret = ath10k_htt_mgmt_tx(ar->htt, skb);
+       else
+               ret = ath10k_htt_tx(ar->htt, skb);
+
+       if (ret) {
+               ath10k_warn("tx failed (%d). dropping packet.\n", ret);
+               ieee80211_free_txskb(ar->hw, skb);
+       }
+}
+
+void ath10k_offchan_tx_purge(struct ath10k *ar)
+{
+       struct sk_buff *skb;
+
+       for (;;) {
+               skb = skb_dequeue(&ar->offchan_tx_queue);
+               if (!skb)
+                       break;
+
+               ieee80211_free_txskb(ar->hw, skb);
+       }
+}
+
+void ath10k_offchan_tx_work(struct work_struct *work)
+{
+       struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
+       struct ath10k_peer *peer;
+       struct ieee80211_hdr *hdr;
+       struct sk_buff *skb;
+       const u8 *peer_addr;
+       int vdev_id;
+       int ret;
+
+       /* FW requirement: We must create a peer before FW will send out
+        * an offchannel frame. Otherwise the frame will be stuck and
+        * never transmitted. We delete the peer upon tx completion.
+        * It is unlikely that a peer for offchannel tx will already be
+        * present. However it may be in some rare cases so account for that.
+        * Otherwise we might remove a legitimate peer and break stuff. */
+
+       for (;;) {
+               skb = skb_dequeue(&ar->offchan_tx_queue);
+               if (!skb)
+                       break;
+
+               mutex_lock(&ar->conf_mutex);
+
+               ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n",
+                          skb);
+
+               hdr = (struct ieee80211_hdr *)skb->data;
+               peer_addr = ieee80211_get_DA(hdr);
+               vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id;
+
+               spin_lock_bh(&ar->data_lock);
+               peer = ath10k_peer_find(ar, vdev_id, peer_addr);
+               spin_unlock_bh(&ar->data_lock);
+
+               if (peer)
+                       ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
+                                  peer_addr, vdev_id);
+
+               if (!peer) {
+                       ret = ath10k_peer_create(ar, vdev_id, peer_addr);
+                       if (ret)
+                               ath10k_warn("peer %pM on vdev %d not created (%d)\n",
+                                           peer_addr, vdev_id, ret);
+               }
+
+               spin_lock_bh(&ar->data_lock);
+               INIT_COMPLETION(ar->offchan_tx_completed);
+               ar->offchan_tx_skb = skb;
+               spin_unlock_bh(&ar->data_lock);
+
+               ath10k_tx_htt(ar, skb);
+
+               ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
+                                                 3 * HZ);
+               if (ret <= 0)
+                       ath10k_warn("timed out waiting for offchannel skb %p\n",
+                                   skb);
+
+               if (!peer) {
+                       ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
+                       if (ret)
+                               ath10k_warn("peer %pM on vdev %d not deleted (%d)\n",
+                                           peer_addr, vdev_id, ret);
+               }
+
+               mutex_unlock(&ar->conf_mutex);
+       }
+}
+
+/************/
+/* Scanning */
+/************/
+
+/*
+ * This gets called if we dont get a heart-beat during scan.
+ * This may indicate the FW has hung and we need to abort the
+ * scan manually to prevent cancel_hw_scan() from deadlocking
+ */
+void ath10k_reset_scan(unsigned long ptr)
+{
+       struct ath10k *ar = (struct ath10k *)ptr;
+
+       spin_lock_bh(&ar->data_lock);
+       if (!ar->scan.in_progress) {
+               spin_unlock_bh(&ar->data_lock);
+               return;
+       }
+
+       ath10k_warn("scan timeout. resetting. fw issue?\n");
+
+       if (ar->scan.is_roc)
+               ieee80211_remain_on_channel_expired(ar->hw);
+       else
+               ieee80211_scan_completed(ar->hw, 1 /* aborted */);
+
+       ar->scan.in_progress = false;
+       complete_all(&ar->scan.completed);
+       spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_abort_scan(struct ath10k *ar)
+{
+       struct wmi_stop_scan_arg arg = {
+               .req_id = 1, /* FIXME */
+               .req_type = WMI_SCAN_STOP_ONE,
+               .u.scan_id = ATH10K_SCAN_ID,
+       };
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       del_timer_sync(&ar->scan.timeout);
+
+       spin_lock_bh(&ar->data_lock);
+       if (!ar->scan.in_progress) {
+               spin_unlock_bh(&ar->data_lock);
+               return 0;
+       }
+
+       ar->scan.aborting = true;
+       spin_unlock_bh(&ar->data_lock);
+
+       ret = ath10k_wmi_stop_scan(ar, &arg);
+       if (ret) {
+               ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
+               return -EIO;
+       }
+
+       ath10k_wmi_flush_tx(ar);
+
+       ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
+       if (ret == 0)
+               ath10k_warn("timed out while waiting for scan to stop\n");
+
+       /* scan completion may be done right after we timeout here, so let's
+        * check the in_progress and tell mac80211 scan is completed. if we
+        * don't do that and FW fails to send us scan completion indication
+        * then userspace won't be able to scan anymore */
+       ret = 0;
+
+       spin_lock_bh(&ar->data_lock);
+       if (ar->scan.in_progress) {
+               ath10k_warn("could not stop scan. its still in progress\n");
+               ar->scan.in_progress = false;
+               ath10k_offchan_tx_purge(ar);
+               ret = -ETIMEDOUT;
+       }
+       spin_unlock_bh(&ar->data_lock);
+
+       return ret;
+}
+
+static int ath10k_start_scan(struct ath10k *ar,
+                            const struct wmi_start_scan_arg *arg)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       ret = ath10k_wmi_start_scan(ar, arg);
+       if (ret)
+               return ret;
+
+       /* make sure we submit the command so the completion
+       * timeout makes sense */
+       ath10k_wmi_flush_tx(ar);
+
+       ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
+       if (ret == 0) {
+               ath10k_abort_scan(ar);
+               return ret;
+       }
+
+       /* the scan can complete earlier, before we even
+        * start the timer. in that case the timer handler
+        * checks ar->scan.in_progress and bails out if its
+        * false. Add a 200ms margin to account event/command
+        * processing. */
+       mod_timer(&ar->scan.timeout, jiffies +
+                 msecs_to_jiffies(arg->max_scan_time+200));
+       return 0;
+}
+
+/**********************/
+/* mac80211 callbacks */
+/**********************/
+
+static void ath10k_tx(struct ieee80211_hw *hw,
+                     struct ieee80211_tx_control *control,
+                     struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = NULL;
+       u32 vdev_id = 0;
+       u8 tid;
+
+       if (info->control.vif) {
+               arvif = ath10k_vif_to_arvif(info->control.vif);
+               vdev_id = arvif->vdev_id;
+       } else if (ar->monitor_enabled) {
+               vdev_id = ar->monitor_vdev_id;
+       }
+
+       /* We should disable CCK RATE due to P2P */
+       if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+               ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+       /* we must calculate tid before we apply qos workaround
+        * as we'd lose the qos control field */
+       tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+       if (ieee80211_is_data_qos(hdr->frame_control) &&
+           is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
+               u8 *qc = ieee80211_get_qos_ctl(hdr);
+               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+       }
+
+       ath10k_tx_h_qos_workaround(hw, control, skb);
+       ath10k_tx_h_update_wep_key(skb);
+       ath10k_tx_h_add_p2p_noa_ie(ar, skb);
+       ath10k_tx_h_seq_no(skb);
+
+       memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
+       ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
+       ATH10K_SKB_CB(skb)->htt.tid = tid;
+
+       if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+               spin_lock_bh(&ar->data_lock);
+               ATH10K_SKB_CB(skb)->htt.is_offchan = true;
+               ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id;
+               spin_unlock_bh(&ar->data_lock);
+
+               ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
+
+               skb_queue_tail(&ar->offchan_tx_queue, skb);
+               ieee80211_queue_work(hw, &ar->offchan_tx_work);
+               return;
+       }
+
+       ath10k_tx_htt(ar, skb);
+}
+
+/*
+ * Initialize various parameters with default vaules.
+ */
+static int ath10k_start(struct ieee80211_hw *hw)
+{
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
+       if (ret)
+               ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
+                           ret);
+
+       ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0);
+       if (ret)
+               ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
+                           ret);
+
+       return 0;
+}
+
+static void ath10k_stop(struct ieee80211_hw *hw)
+{
+       struct ath10k *ar = hw->priv;
+
+       /* avoid leaks in case FW never confirms scan for offchannel */
+       cancel_work_sync(&ar->offchan_tx_work);
+       ath10k_offchan_tx_purge(ar);
+}
+
+static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct ath10k_generic_iter ar_iter;
+       struct ath10k *ar = hw->priv;
+       struct ieee80211_conf *conf = &hw->conf;
+       int ret = 0;
+       u32 flags;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n",
+                          conf->chandef.chan->center_freq);
+               spin_lock_bh(&ar->data_lock);
+               ar->rx_channel = conf->chandef.chan;
+               spin_unlock_bh(&ar->data_lock);
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_PS) {
+               memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
+               ar_iter.ar = ar;
+               flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+
+               ieee80211_iterate_active_interfaces_atomic(hw,
+                                                          flags,
+                                                          ath10k_ps_iter,
+                                                          &ar_iter);
+
+               ret = ar_iter.ret;
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+               if (conf->flags & IEEE80211_CONF_MONITOR)
+                       ret = ath10k_monitor_create(ar);
+               else
+                       ret = ath10k_monitor_destroy(ar);
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+/*
+ * TODO:
+ * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ * because we will send mgmt frames without CCK. This requirement
+ * for P2P_FIND/GO_NEG should be handled by checking CCK flag
+ * in the TX packet.
+ */
+static int ath10k_add_interface(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       enum wmi_sta_powersave_param param;
+       int ret = 0;
+       u32 value;
+       int bit;
+
+       mutex_lock(&ar->conf_mutex);
+
+       arvif->ar = ar;
+       arvif->vif = vif;
+
+       if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
+               ath10k_warn("Only one monitor interface allowed\n");
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       bit = ffs(ar->free_vdev_map);
+       if (bit == 0) {
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       arvif->vdev_id = bit - 1;
+       arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+       ar->free_vdev_map &= ~(1 << arvif->vdev_id);
+
+       if (ar->p2p)
+               arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_UNSPECIFIED:
+       case NL80211_IFTYPE_STATION:
+               arvif->vdev_type = WMI_VDEV_TYPE_STA;
+               if (vif->p2p)
+                       arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
+               break;
+       case NL80211_IFTYPE_AP:
+               arvif->vdev_type = WMI_VDEV_TYPE_AP;
+
+               if (vif->p2p)
+                       arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+               break;
+       case NL80211_IFTYPE_MONITOR:
+               arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+               break;
+       default:
+               WARN_ON(1);
+               break;
+       }
+
+       ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n",
+                  arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
+
+       ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
+                                    arvif->vdev_subtype, vif->addr);
+       if (ret) {
+               ath10k_warn("WMI vdev create failed: ret %d\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID,
+                                       arvif->def_wep_key_index);
+       if (ret)
+               ath10k_warn("Failed to set default keyid: %d\n", ret);
+
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                       WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+                                       ATH10K_HW_TXRX_NATIVE_WIFI);
+       if (ret)
+               ath10k_warn("Failed to set TX encap: %d\n", ret);
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+               ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
+               if (ret) {
+                       ath10k_warn("Failed to create peer for AP: %d\n", ret);
+                       goto exit;
+               }
+       }
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+               param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+               value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+               ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+                                                 param, value);
+               if (ret)
+                       ath10k_warn("Failed to set RX wake policy: %d\n", ret);
+
+               param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+               value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+               ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+                                                 param, value);
+               if (ret)
+                       ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
+
+               param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+               value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+               ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+                                                 param, value);
+               if (ret)
+                       ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
+       }
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+               ar->monitor_present = true;
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static void ath10k_remove_interface(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id);
+
+       ar->free_vdev_map |= 1 << (arvif->vdev_id);
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+               ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
+               if (ret)
+                       ath10k_warn("Failed to remove peer for AP: %d\n", ret);
+
+               kfree(arvif->u.ap.noa_data);
+       }
+
+       ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+       if (ret)
+               ath10k_warn("WMI vdev delete failed: %d\n", ret);
+
+       if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+               ar->monitor_present = false;
+
+       ath10k_peer_cleanup(ar, arvif->vdev_id);
+
+       mutex_unlock(&ar->conf_mutex);
+}
+
+/*
+ * FIXME: Has to be verified.
+ */
+#define SUPPORTED_FILTERS                      \
+       (FIF_PROMISC_IN_BSS |                   \
+       FIF_ALLMULTI |                          \
+       FIF_CONTROL |                           \
+       FIF_PSPOLL |                            \
+       FIF_OTHER_BSS |                         \
+       FIF_BCN_PRBRESP_PROMISC |               \
+       FIF_PROBE_REQ |                         \
+       FIF_FCSFAIL)
+
+static void ath10k_configure_filter(struct ieee80211_hw *hw,
+                                   unsigned int changed_flags,
+                                   unsigned int *total_flags,
+                                   u64 multicast)
+{
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       changed_flags &= SUPPORTED_FILTERS;
+       *total_flags &= SUPPORTED_FILTERS;
+       ar->filter_flags = *total_flags;
+
+       if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
+           !ar->monitor_enabled) {
+               ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
+               if (ret)
+                       ath10k_warn("Unable to start monitor mode\n");
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
+       } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
+                  ar->monitor_enabled) {
+               ret = ath10k_monitor_stop(ar);
+               if (ret)
+                       ath10k_warn("Unable to stop monitor mode\n");
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_bss_conf *info,
+                                   u32 changed)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       int ret = 0;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (changed & BSS_CHANGED_IBSS)
+               ath10k_control_ibss(arvif, info, vif->addr);
+
+       if (changed & BSS_CHANGED_BEACON_INT) {
+               arvif->beacon_interval = info->beacon_int;
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                               WMI_VDEV_PARAM_BEACON_INTERVAL,
+                                               arvif->beacon_interval);
+               if (ret)
+                       ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
+                                   arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Beacon interval: %d set for VDEV: %d\n",
+                                  arvif->beacon_interval, arvif->vdev_id);
+       }
+
+       if (changed & BSS_CHANGED_BEACON) {
+               ret = ath10k_wmi_pdev_set_param(ar,
+                                               WMI_PDEV_PARAM_BEACON_TX_MODE,
+                                               WMI_BEACON_STAGGERED_MODE);
+               if (ret)
+                       ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
+                                   arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Set staggered beacon mode for VDEV: %d\n",
+                                  arvif->vdev_id);
+       }
+
+       if (changed & BSS_CHANGED_BEACON_INFO) {
+               arvif->dtim_period = info->dtim_period;
+
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                               WMI_VDEV_PARAM_DTIM_PERIOD,
+                                               arvif->dtim_period);
+               if (ret)
+                       ath10k_warn("Failed to set dtim period for VDEV: %d\n",
+                                   arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Set dtim period: %d for VDEV: %d\n",
+                                  arvif->dtim_period, arvif->vdev_id);
+       }
+
+       if (changed & BSS_CHANGED_SSID &&
+           vif->type == NL80211_IFTYPE_AP) {
+               arvif->u.ap.ssid_len = info->ssid_len;
+               if (info->ssid_len)
+                       memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+               arvif->u.ap.hidden_ssid = info->hidden_ssid;
+       }
+
+       if (changed & BSS_CHANGED_BSSID) {
+               if (!is_zero_ether_addr(info->bssid)) {
+                       ret = ath10k_peer_create(ar, arvif->vdev_id,
+                                                info->bssid);
+                       if (ret)
+                               ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
+                                           info->bssid, arvif->vdev_id);
+                       else
+                               ath10k_dbg(ATH10K_DBG_MAC,
+                                          "Added peer: %pM for VDEV: %d\n",
+                                          info->bssid, arvif->vdev_id);
+
+
+                       if (vif->type == NL80211_IFTYPE_STATION) {
+                               /*
+                                * this is never erased as we it for crypto key
+                                * clearing; this is FW requirement
+                                */
+                               memcpy(arvif->u.sta.bssid, info->bssid,
+                                      ETH_ALEN);
+
+                               ret = ath10k_vdev_start(arvif);
+                               if (!ret)
+                                       ath10k_dbg(ATH10K_DBG_MAC,
+                                                  "VDEV: %d started with BSSID: %pM\n",
+                                                  arvif->vdev_id, info->bssid);
+                       }
+
+                       /*
+                        * Mac80211 does not keep IBSS bssid when leaving IBSS,
+                        * so driver need to store it. It is needed when leaving
+                        * IBSS in order to remove BSSID peer.
+                        */
+                       if (vif->type == NL80211_IFTYPE_ADHOC)
+                               memcpy(arvif->u.ibss.bssid, info->bssid,
+                                      ETH_ALEN);
+               }
+       }
+
+       if (changed & BSS_CHANGED_BEACON_ENABLED)
+               ath10k_control_beaconing(arvif, info);
+
+       if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+               u32 cts_prot;
+               if (info->use_cts_prot)
+                       cts_prot = 1;
+               else
+                       cts_prot = 0;
+
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                               WMI_VDEV_PARAM_ENABLE_RTSCTS,
+                                               cts_prot);
+               if (ret)
+                       ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
+                                   arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Set CTS prot: %d for VDEV: %d\n",
+                                  cts_prot, arvif->vdev_id);
+       }
+
+       if (changed & BSS_CHANGED_ERP_SLOT) {
+               u32 slottime;
+               if (info->use_short_slot)
+                       slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+               else
+                       slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                               WMI_VDEV_PARAM_SLOT_TIME,
+                                               slottime);
+               if (ret)
+                       ath10k_warn("Failed to set erp slot for VDEV: %d\n",
+                                   arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Set slottime: %d for VDEV: %d\n",
+                                  slottime, arvif->vdev_id);
+       }
+
+       if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+               u32 preamble;
+               if (info->use_short_preamble)
+                       preamble = WMI_VDEV_PREAMBLE_SHORT;
+               else
+                       preamble = WMI_VDEV_PREAMBLE_LONG;
+
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                               WMI_VDEV_PARAM_PREAMBLE,
+                                               preamble);
+               if (ret)
+                       ath10k_warn("Failed to set preamble for VDEV: %d\n",
+                                   arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Set preamble: %d for VDEV: %d\n",
+                                  preamble, arvif->vdev_id);
+       }
+
+       if (changed & BSS_CHANGED_ASSOC) {
+               if (info->assoc)
+                       ath10k_bss_assoc(hw, vif, info);
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_hw_scan(struct ieee80211_hw *hw,
+                         struct ieee80211_vif *vif,
+                         struct cfg80211_scan_request *req)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct wmi_start_scan_arg arg;
+       int ret = 0;
+       int i;
+
+       mutex_lock(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       if (ar->scan.in_progress) {
+               spin_unlock_bh(&ar->data_lock);
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       INIT_COMPLETION(ar->scan.started);
+       INIT_COMPLETION(ar->scan.completed);
+       ar->scan.in_progress = true;
+       ar->scan.aborting = false;
+       ar->scan.is_roc = false;
+       ar->scan.vdev_id = arvif->vdev_id;
+       spin_unlock_bh(&ar->data_lock);
+
+       memset(&arg, 0, sizeof(arg));
+       ath10k_wmi_start_scan_init(ar, &arg);
+       arg.vdev_id = arvif->vdev_id;
+       arg.scan_id = ATH10K_SCAN_ID;
+
+       if (!req->no_cck)
+               arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
+
+       if (req->ie_len) {
+               arg.ie_len = req->ie_len;
+               memcpy(arg.ie, req->ie, arg.ie_len);
+       }
+
+       if (req->n_ssids) {
+               arg.n_ssids = req->n_ssids;
+               for (i = 0; i < arg.n_ssids; i++) {
+                       arg.ssids[i].len  = req->ssids[i].ssid_len;
+                       arg.ssids[i].ssid = req->ssids[i].ssid;
+               }
+       }
+
+       if (req->n_channels) {
+               arg.n_channels = req->n_channels;
+               for (i = 0; i < arg.n_channels; i++)
+                       arg.channels[i] = req->channels[i]->center_freq;
+       }
+
+       ret = ath10k_start_scan(ar, &arg);
+       if (ret) {
+               ath10k_warn("could not start hw scan (%d)\n", ret);
+               spin_lock_bh(&ar->data_lock);
+               ar->scan.in_progress = false;
+               spin_unlock_bh(&ar->data_lock);
+       }
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
+                                 struct ieee80211_vif *vif)
+{
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+       ret = ath10k_abort_scan(ar);
+       if (ret) {
+               ath10k_warn("couldn't abort scan (%d). forcefully sending scan completion to mac80211\n",
+                           ret);
+               ieee80211_scan_completed(hw, 1 /* aborted */);
+       }
+       mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                         struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                         struct ieee80211_key_conf *key)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_peer *peer;
+       const u8 *peer_addr;
+       bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+                     key->cipher == WLAN_CIPHER_SUITE_WEP104;
+       int ret = 0;
+
+       if (key->keyidx > WMI_MAX_KEY_INDEX)
+               return -ENOSPC;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (sta)
+               peer_addr = sta->addr;
+       else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+               peer_addr = vif->bss_conf.bssid;
+       else
+               peer_addr = vif->addr;
+
+       key->hw_key_idx = key->keyidx;
+
+       /* the peer should not disappear in mid-way (unless FW goes awry) since
+        * we already hold conf_mutex. we just make sure its there now. */
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+       spin_unlock_bh(&ar->data_lock);
+
+       if (!peer) {
+               if (cmd == SET_KEY) {
+                       ath10k_warn("cannot install key for non-existent peer %pM\n",
+                                   peer_addr);
+                       ret = -EOPNOTSUPP;
+                       goto exit;
+               } else {
+                       /* if the peer doesn't exist there is no key to disable
+                        * anymore */
+                       goto exit;
+               }
+       }
+
+       if (is_wep) {
+               if (cmd == SET_KEY)
+                       arvif->wep_keys[key->keyidx] = key;
+               else
+                       arvif->wep_keys[key->keyidx] = NULL;
+
+               if (cmd == DISABLE_KEY)
+                       ath10k_clear_vdev_key(arvif, key);
+       }
+
+       ret = ath10k_install_key(arvif, key, cmd, peer_addr);
+       if (ret) {
+               ath10k_warn("ath10k_install_key failed (%d)\n", ret);
+               goto exit;
+       }
+
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+       if (peer && cmd == SET_KEY)
+               peer->keys[key->keyidx] = key;
+       else if (peer && cmd == DISABLE_KEY)
+               peer->keys[key->keyidx] = NULL;
+       else if (peer == NULL)
+               /* impossible unless FW goes crazy */
+               ath10k_warn("peer %pM disappeared!\n", peer_addr);
+       spin_unlock_bh(&ar->data_lock);
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static int ath10k_sta_state(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta,
+                           enum ieee80211_sta_state old_state,
+                           enum ieee80211_sta_state new_state)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       int ret = 0;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (old_state == IEEE80211_STA_NOTEXIST &&
+           new_state == IEEE80211_STA_NONE &&
+           vif->type != NL80211_IFTYPE_STATION) {
+               /*
+                * New station addition.
+                */
+               ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
+               if (ret)
+                       ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
+                                   sta->addr, arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Added peer: %pM for VDEV: %d\n",
+                                  sta->addr, arvif->vdev_id);
+       } else if ((old_state == IEEE80211_STA_NONE &&
+                   new_state == IEEE80211_STA_NOTEXIST)) {
+               /*
+                * Existing station deletion.
+                */
+               ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+               if (ret)
+                       ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
+                                   sta->addr, arvif->vdev_id);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Removed peer: %pM for VDEV: %d\n",
+                                  sta->addr, arvif->vdev_id);
+
+               if (vif->type == NL80211_IFTYPE_STATION)
+                       ath10k_bss_disassoc(hw, vif);
+       } else if (old_state == IEEE80211_STA_AUTH &&
+                  new_state == IEEE80211_STA_ASSOC &&
+                  (vif->type == NL80211_IFTYPE_AP ||
+                   vif->type == NL80211_IFTYPE_ADHOC)) {
+               /*
+                * New association.
+                */
+               ret = ath10k_station_assoc(ar, arvif, sta);
+               if (ret)
+                       ath10k_warn("Failed to associate station: %pM\n",
+                                   sta->addr);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Station %pM moved to assoc state\n",
+                                  sta->addr);
+       } else if (old_state == IEEE80211_STA_ASSOC &&
+                  new_state == IEEE80211_STA_AUTH &&
+                  (vif->type == NL80211_IFTYPE_AP ||
+                   vif->type == NL80211_IFTYPE_ADHOC)) {
+               /*
+                * Disassociation.
+                */
+               ret = ath10k_station_disassoc(ar, arvif, sta);
+               if (ret)
+                       ath10k_warn("Failed to disassociate station: %pM\n",
+                                   sta->addr);
+               else
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "Station %pM moved to disassociated state\n",
+                                  sta->addr);
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
+                                u16 ac, bool enable)
+{
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       u32 value = 0;
+       int ret = 0;
+
+       if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+               return 0;
+
+       switch (ac) {
+       case IEEE80211_AC_VO:
+               value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+                       WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+               break;
+       case IEEE80211_AC_VI:
+               value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+                       WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+               break;
+       case IEEE80211_AC_BE:
+               value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+                       WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+               break;
+       case IEEE80211_AC_BK:
+               value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+                       WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+               break;
+       }
+
+       if (enable)
+               arvif->u.sta.uapsd |= value;
+       else
+               arvif->u.sta.uapsd &= ~value;
+
+       ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+                                         WMI_STA_PS_PARAM_UAPSD,
+                                         arvif->u.sta.uapsd);
+       if (ret) {
+               ath10k_warn("could not set uapsd params %d\n", ret);
+               goto exit;
+       }
+
+       if (arvif->u.sta.uapsd)
+               value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+       else
+               value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+       ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+                                         WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+                                         value);
+       if (ret)
+               ath10k_warn("could not set rx wake param %d\n", ret);
+
+exit:
+       return ret;
+}
+
+static int ath10k_conf_tx(struct ieee80211_hw *hw,
+                         struct ieee80211_vif *vif, u16 ac,
+                         const struct ieee80211_tx_queue_params *params)
+{
+       struct ath10k *ar = hw->priv;
+       struct wmi_wmm_params_arg *p = NULL;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       switch (ac) {
+       case IEEE80211_AC_VO:
+               p = &ar->wmm_params.ac_vo;
+               break;
+       case IEEE80211_AC_VI:
+               p = &ar->wmm_params.ac_vi;
+               break;
+       case IEEE80211_AC_BE:
+               p = &ar->wmm_params.ac_be;
+               break;
+       case IEEE80211_AC_BK:
+               p = &ar->wmm_params.ac_bk;
+               break;
+       }
+
+       if (WARN_ON(!p)) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       p->cwmin = params->cw_min;
+       p->cwmax = params->cw_max;
+       p->aifs = params->aifs;
+
+       /*
+        * The channel time duration programmed in the HW is in absolute
+        * microseconds, while mac80211 gives the txop in units of
+        * 32 microseconds.
+        */
+       p->txop = params->txop * 32;
+
+       /* FIXME: FW accepts wmm params per hw, not per vif */
+       ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
+       if (ret) {
+               ath10k_warn("could not set wmm params %d\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+       if (ret)
+               ath10k_warn("could not set sta uapsd %d\n", ret);
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
+
+static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   struct ieee80211_channel *chan,
+                                   int duration,
+                                   enum ieee80211_roc_type type)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct wmi_start_scan_arg arg;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       if (ar->scan.in_progress) {
+               spin_unlock_bh(&ar->data_lock);
+               ret = -EBUSY;
+               goto exit;
+       }
+
+       INIT_COMPLETION(ar->scan.started);
+       INIT_COMPLETION(ar->scan.completed);
+       INIT_COMPLETION(ar->scan.on_channel);
+       ar->scan.in_progress = true;
+       ar->scan.aborting = false;
+       ar->scan.is_roc = true;
+       ar->scan.vdev_id = arvif->vdev_id;
+       ar->scan.roc_freq = chan->center_freq;
+       spin_unlock_bh(&ar->data_lock);
+
+       memset(&arg, 0, sizeof(arg));
+       ath10k_wmi_start_scan_init(ar, &arg);
+       arg.vdev_id = arvif->vdev_id;
+       arg.scan_id = ATH10K_SCAN_ID;
+       arg.n_channels = 1;
+       arg.channels[0] = chan->center_freq;
+       arg.dwell_time_active = duration;
+       arg.dwell_time_passive = duration;
+       arg.max_scan_time = 2 * duration;
+       arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+       arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+
+       ret = ath10k_start_scan(ar, &arg);
+       if (ret) {
+               ath10k_warn("could not start roc scan (%d)\n", ret);
+               spin_lock_bh(&ar->data_lock);
+               ar->scan.in_progress = false;
+               spin_unlock_bh(&ar->data_lock);
+               goto exit;
+       }
+
+       ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
+       if (ret == 0) {
+               ath10k_warn("could not switch to channel for roc scan\n");
+               ath10k_abort_scan(ar);
+               ret = -ETIMEDOUT;
+               goto exit;
+       }
+
+       ret = 0;
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+       struct ath10k *ar = hw->priv;
+
+       mutex_lock(&ar->conf_mutex);
+       ath10k_abort_scan(ar);
+       mutex_unlock(&ar->conf_mutex);
+
+       return 0;
+}
+
+/*
+ * Both RTS and Fragmentation threshold are interface-specific
+ * in ath10k, but device-specific in mac80211.
+ */
+static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath10k_generic_iter *ar_iter = data;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
+
+       rts = min_t(u32, rts, ATH10K_RTS_MAX);
+
+       ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
+                                                WMI_VDEV_PARAM_RTS_THRESHOLD,
+                                                rts);
+       if (ar_iter->ret)
+               ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
+                           arvif->vdev_id);
+       else
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "Set RTS threshold: %d for VDEV: %d\n",
+                          rts, arvif->vdev_id);
+}
+
+static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+       struct ath10k_generic_iter ar_iter;
+       struct ath10k *ar = hw->priv;
+
+       memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
+       ar_iter.ar = ar;
+
+       mutex_lock(&ar->conf_mutex);
+       ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+                                           ath10k_set_rts_iter, &ar_iter);
+       mutex_unlock(&ar->conf_mutex);
+
+       return ar_iter.ret;
+}
+
+static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath10k_generic_iter *ar_iter = data;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
+       int ret;
+
+       frag = clamp_t(u32, frag,
+                      ATH10K_FRAGMT_THRESHOLD_MIN,
+                      ATH10K_FRAGMT_THRESHOLD_MAX);
+
+       ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
+                                       WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+                                       frag);
+
+       ar_iter->ret = ret;
+       if (ar_iter->ret)
+               ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
+                           arvif->vdev_id);
+       else
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "Set frag threshold: %d for VDEV: %d\n",
+                          frag, arvif->vdev_id);
+}
+
+static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+       struct ath10k_generic_iter ar_iter;
+       struct ath10k *ar = hw->priv;
+
+       memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
+       ar_iter.ar = ar;
+
+       mutex_lock(&ar->conf_mutex);
+       ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+                                           ath10k_set_frag_iter, &ar_iter);
+       mutex_unlock(&ar->conf_mutex);
+
+       return ar_iter.ret;
+}
+
+static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+{
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       /* mac80211 doesn't care if we really xmit queued frames or not
+        * we'll collect those frames either way if we stop/delete vdevs */
+       if (drop)
+               return;
+
+       ret = wait_event_timeout(ar->htt->empty_tx_wq, ({
+                       bool empty;
+                       spin_lock_bh(&ar->htt->tx_lock);
+                       empty = bitmap_empty(ar->htt->used_msdu_ids,
+                                            ar->htt->max_num_pending_tx);
+                       spin_unlock_bh(&ar->htt->tx_lock);
+                       (empty);
+               }), ATH10K_FLUSH_TIMEOUT_HZ);
+       if (ret <= 0)
+               ath10k_warn("tx not flushed\n");
+}
+
+/* TODO: Implement this function properly
+ * For now it is needed to reply to Probe Requests in IBSS mode.
+ * Propably we need this information from FW.
+ */
+static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
+{
+       return 1;
+}
+
+static const struct ieee80211_ops ath10k_ops = {
+       .tx                             = ath10k_tx,
+       .start                          = ath10k_start,
+       .stop                           = ath10k_stop,
+       .config                         = ath10k_config,
+       .add_interface                  = ath10k_add_interface,
+       .remove_interface               = ath10k_remove_interface,
+       .configure_filter               = ath10k_configure_filter,
+       .bss_info_changed               = ath10k_bss_info_changed,
+       .hw_scan                        = ath10k_hw_scan,
+       .cancel_hw_scan                 = ath10k_cancel_hw_scan,
+       .set_key                        = ath10k_set_key,
+       .sta_state                      = ath10k_sta_state,
+       .conf_tx                        = ath10k_conf_tx,
+       .remain_on_channel              = ath10k_remain_on_channel,
+       .cancel_remain_on_channel       = ath10k_cancel_remain_on_channel,
+       .set_rts_threshold              = ath10k_set_rts_threshold,
+       .set_frag_threshold             = ath10k_set_frag_threshold,
+       .flush                          = ath10k_flush,
+       .tx_last_beacon                 = ath10k_tx_last_beacon,
+};
+
+#define RATETAB_ENT(_rate, _rateid, _flags) { \
+       .bitrate                = (_rate), \
+       .flags                  = (_flags), \
+       .hw_value               = (_rateid), \
+}
+
+#define CHAN2G(_channel, _freq, _flags) { \
+       .band                   = IEEE80211_BAND_2GHZ, \
+       .hw_value               = (_channel), \
+       .center_freq            = (_freq), \
+       .flags                  = (_flags), \
+       .max_antenna_gain       = 0, \
+       .max_power              = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+       .band                   = IEEE80211_BAND_5GHZ, \
+       .hw_value               = (_channel), \
+       .center_freq            = (_freq), \
+       .flags                  = (_flags), \
+       .max_antenna_gain       = 0, \
+       .max_power              = 30, \
+}
+
+static const struct ieee80211_channel ath10k_2ghz_channels[] = {
+       CHAN2G(1, 2412, 0),
+       CHAN2G(2, 2417, 0),
+       CHAN2G(3, 2422, 0),
+       CHAN2G(4, 2427, 0),
+       CHAN2G(5, 2432, 0),
+       CHAN2G(6, 2437, 0),
+       CHAN2G(7, 2442, 0),
+       CHAN2G(8, 2447, 0),
+       CHAN2G(9, 2452, 0),
+       CHAN2G(10, 2457, 0),
+       CHAN2G(11, 2462, 0),
+       CHAN2G(12, 2467, 0),
+       CHAN2G(13, 2472, 0),
+       CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath10k_5ghz_channels[] = {
+       CHAN5G(36, 5180, 14),
+       CHAN5G(40, 5200, 15),
+       CHAN5G(44, 5220, 16),
+       CHAN5G(48, 5240, 17),
+       CHAN5G(52, 5260, 18),
+       CHAN5G(56, 5280, 19),
+       CHAN5G(60, 5300, 20),
+       CHAN5G(64, 5320, 21),
+       CHAN5G(100, 5500, 22),
+       CHAN5G(104, 5520, 23),
+       CHAN5G(108, 5540, 24),
+       CHAN5G(112, 5560, 25),
+       CHAN5G(116, 5580, 26),
+       CHAN5G(120, 5600, 27),
+       CHAN5G(124, 5620, 28),
+       CHAN5G(128, 5640, 29),
+       CHAN5G(132, 5660, 30),
+       CHAN5G(136, 5680, 31),
+       CHAN5G(140, 5700, 32),
+       CHAN5G(149, 5745, 33),
+       CHAN5G(153, 5765, 34),
+       CHAN5G(157, 5785, 35),
+       CHAN5G(161, 5805, 36),
+       CHAN5G(165, 5825, 37),
+};
+
+static struct ieee80211_rate ath10k_rates[] = {
+       /* CCK */
+       RATETAB_ENT(10,  0x82, 0),
+       RATETAB_ENT(20,  0x84, 0),
+       RATETAB_ENT(55,  0x8b, 0),
+       RATETAB_ENT(110, 0x96, 0),
+       /* OFDM */
+       RATETAB_ENT(60,  0x0c, 0),
+       RATETAB_ENT(90,  0x12, 0),
+       RATETAB_ENT(120, 0x18, 0),
+       RATETAB_ENT(180, 0x24, 0),
+       RATETAB_ENT(240, 0x30, 0),
+       RATETAB_ENT(360, 0x48, 0),
+       RATETAB_ENT(480, 0x60, 0),
+       RATETAB_ENT(540, 0x6c, 0),
+};
+
+#define ath10k_a_rates (ath10k_rates + 4)
+#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4)
+#define ath10k_g_rates (ath10k_rates + 0)
+#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+
+struct ath10k *ath10k_mac_create(void)
+{
+       struct ieee80211_hw *hw;
+       struct ath10k *ar;
+
+       hw = ieee80211_alloc_hw(sizeof(struct ath10k), &ath10k_ops);
+       if (!hw)
+               return NULL;
+
+       ar = hw->priv;
+       ar->hw = hw;
+
+       return ar;
+}
+
+void ath10k_mac_destroy(struct ath10k *ar)
+{
+       ieee80211_free_hw(ar->hw);
+}
+
+static const struct ieee80211_iface_limit ath10k_if_limits[] = {
+       {
+       .max    = 8,
+       .types  = BIT(NL80211_IFTYPE_STATION)
+               | BIT(NL80211_IFTYPE_P2P_CLIENT)
+               | BIT(NL80211_IFTYPE_P2P_GO)
+               | BIT(NL80211_IFTYPE_AP)
+       }
+};
+
+static const struct ieee80211_iface_combination ath10k_if_comb = {
+       .limits = ath10k_if_limits,
+       .n_limits = ARRAY_SIZE(ath10k_if_limits),
+       .max_interfaces = 8,
+       .num_different_channels = 1,
+       .beacon_int_infra_match = true,
+};
+
+static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
+{
+       struct ieee80211_sta_vht_cap vht_cap = {0};
+       u16 mcs_map;
+
+       vht_cap.vht_supported = 1;
+       vht_cap.cap = ar->vht_cap_info;
+
+       /* FIXME: check dynamically how many streams board supports */
+       mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+               IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+               IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+               IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+               IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
+               IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+               IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
+               IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
+
+       vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+       vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+
+       return vht_cap;
+}
+
+static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
+{
+       int i;
+       struct ieee80211_sta_ht_cap ht_cap = {0};
+
+       if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
+               return ht_cap;
+
+       ht_cap.ht_supported = 1;
+       ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+       ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+       ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+       ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+       ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
+               ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
+               ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
+               u32 smps;
+
+               smps   = WLAN_HT_CAP_SM_PS_DYNAMIC;
+               smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+               ht_cap.cap |= smps;
+       }
+
+       if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
+               ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
+               u32 stbc;
+
+               stbc   = ar->ht_cap_info;
+               stbc  &= WMI_HT_CAP_RX_STBC;
+               stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+               stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+               stbc  &= IEEE80211_HT_CAP_RX_STBC;
+
+               ht_cap.cap |= stbc;
+       }
+
+       if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
+               ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
+               ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+       /* max AMSDU is implicitly taken from vht_cap_info */
+       if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+               ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+       for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++)
+               ht_cap.mcs.rx_mask[i] = 0xFF;
+
+       ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+       return ht_cap;
+}
+
+
+static void ath10k_get_arvif_iter(void *data, u8 *mac,
+                                 struct ieee80211_vif *vif)
+{
+       struct ath10k_vif_iter *arvif_iter = data;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+       if (arvif->vdev_id == arvif_iter->vdev_id)
+               arvif_iter->arvif = arvif;
+}
+
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
+{
+       struct ath10k_vif_iter arvif_iter;
+       u32 flags;
+
+       memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
+       arvif_iter.vdev_id = vdev_id;
+
+       flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+       ieee80211_iterate_active_interfaces_atomic(ar->hw,
+                                                  flags,
+                                                  ath10k_get_arvif_iter,
+                                                  &arvif_iter);
+       if (!arvif_iter.arvif) {
+               ath10k_warn("No VIF found for VDEV: %d\n", vdev_id);
+               return NULL;
+       }
+
+       return arvif_iter.arvif;
+}
+
+int ath10k_mac_register(struct ath10k *ar)
+{
+       struct ieee80211_supported_band *band;
+       struct ieee80211_sta_vht_cap vht_cap;
+       struct ieee80211_sta_ht_cap ht_cap;
+       void *channels;
+       int ret;
+
+       SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+       SET_IEEE80211_DEV(ar->hw, ar->dev);
+
+       ht_cap = ath10k_get_ht_cap(ar);
+       vht_cap = ath10k_create_vht_cap(ar);
+
+       if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+               channels = kmemdup(ath10k_2ghz_channels,
+                                  sizeof(ath10k_2ghz_channels),
+                                  GFP_KERNEL);
+               if (!channels)
+                       return -ENOMEM;
+
+               band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+               band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
+               band->channels = channels;
+               band->n_bitrates = ath10k_g_rates_size;
+               band->bitrates = ath10k_g_rates;
+               band->ht_cap = ht_cap;
+
+               /* vht is not supported in 2.4 GHz */
+
+               ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+       }
+
+       if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
+               channels = kmemdup(ath10k_5ghz_channels,
+                                  sizeof(ath10k_5ghz_channels),
+                                  GFP_KERNEL);
+               if (!channels) {
+                       if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+                               band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+                               kfree(band->channels);
+                       }
+                       return -ENOMEM;
+               }
+
+               band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+               band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
+               band->channels = channels;
+               band->n_bitrates = ath10k_a_rates_size;
+               band->bitrates = ath10k_a_rates;
+               band->ht_cap = ht_cap;
+               band->vht_cap = vht_cap;
+               ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+       }
+
+       ar->hw->wiphy->interface_modes =
+               BIT(NL80211_IFTYPE_STATION) |
+               BIT(NL80211_IFTYPE_ADHOC) |
+               BIT(NL80211_IFTYPE_AP) |
+               BIT(NL80211_IFTYPE_P2P_CLIENT) |
+               BIT(NL80211_IFTYPE_P2P_GO);
+
+       ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
+                       IEEE80211_HW_SUPPORTS_PS |
+                       IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+                       IEEE80211_HW_SUPPORTS_UAPSD |
+                       IEEE80211_HW_MFP_CAPABLE |
+                       IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+                       IEEE80211_HW_HAS_RATE_CONTROL |
+                       IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+                       IEEE80211_HW_WANT_MONITOR_VIF |
+                       IEEE80211_HW_AP_LINK_PS;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
+               ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
+
+       if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
+               ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+               ar->hw->flags |= IEEE80211_HW_TX_AMPDU_SETUP_IN_HW;
+       }
+
+       ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+       ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+       ar->hw->vif_data_size = sizeof(struct ath10k_vif);
+
+       ar->hw->channel_change_time = 5000;
+       ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
+
+       ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+       ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+       ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+       /*
+        * on LL hardware queues are managed entirely by the FW
+        * so we only advertise to mac we can do the queues thing
+        */
+       ar->hw->queues = 4;
+
+       ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
+       ar->hw->wiphy->n_iface_combinations = 1;
+
+       ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
+                           ath10k_reg_notifier);
+       if (ret) {
+               ath10k_err("Regulatory initialization failed\n");
+               return ret;
+       }
+
+       ret = ieee80211_register_hw(ar->hw);
+       if (ret) {
+               ath10k_err("ieee80211 registration failed: %d\n", ret);
+               return ret;
+       }
+
+       if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
+               ret = regulatory_hint(ar->hw->wiphy,
+                                     ar->ath_common.regulatory.alpha2);
+               if (ret)
+                       goto exit;
+       }
+
+       return 0;
+exit:
+       ieee80211_unregister_hw(ar->hw);
+       return ret;
+}
+
+void ath10k_mac_unregister(struct ath10k *ar)
+{
+       ieee80211_unregister_hw(ar->hw);
+
+       kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
+       kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+
+       SET_IEEE80211_DEV(ar->hw, NULL);
+}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
new file mode 100644 (file)
index 0000000..27fc92e
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MAC_H_
+#define _MAC_H_
+
+#include <net/mac80211.h>
+#include "core.h"
+
+struct ath10k_generic_iter {
+       struct ath10k *ar;
+       int ret;
+};
+
+struct ath10k *ath10k_mac_create(void);
+void ath10k_mac_destroy(struct ath10k *ar);
+int ath10k_mac_register(struct ath10k *ar);
+void ath10k_mac_unregister(struct ath10k *ar);
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
+void ath10k_reset_scan(unsigned long ptr);
+void ath10k_offchan_tx_purge(struct ath10k *ar);
+void ath10k_offchan_tx_work(struct work_struct *work);
+
+static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
+{
+       return (struct ath10k_vif *)vif->drv_priv;
+}
+
+static inline void ath10k_tx_h_seq_no(struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_vif *vif = info->control.vif;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+       if (info->flags  & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+               if (arvif->tx_seq_no == 0)
+                       arvif->tx_seq_no = 0x1000;
+
+               if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+                       arvif->tx_seq_no += 0x10;
+               hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+               hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no);
+       }
+}
+
+#endif /* _MAC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
new file mode 100644 (file)
index 0000000..c8e9056
--- /dev/null
@@ -0,0 +1,2506 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include "core.h"
+#include "debug.h"
+
+#include "targaddrs.h"
+#include "bmi.h"
+
+#include "hif.h"
+#include "htc.h"
+
+#include "ce.h"
+#include "pci.h"
+
+unsigned int ath10k_target_ps;
+module_param(ath10k_target_ps, uint, 0644);
+MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
+
+#define QCA988X_1_0_DEVICE_ID  (0xabcd)
+#define QCA988X_2_0_DEVICE_ID  (0x003c)
+
+static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
+       { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
+       { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
+       {0}
+};
+
+static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
+                                      u32 *data);
+
+static void ath10k_pci_process_ce(struct ath10k *ar);
+static int ath10k_pci_post_rx(struct ath10k *ar);
+static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+                                            int num);
+static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
+static void ath10k_pci_stop_ce(struct ath10k *ar);
+
+static const struct ce_attr host_ce_config_wlan[] = {
+       /* host->target HTC control and raw streams */
+       { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
+       /* could be moved to share CE3 */
+       /* target->host HTT + HTC control */
+       { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
+       /* target->host WMI */
+       { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
+       /* host->target WMI */
+       { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
+       /* host->target HTT */
+       { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
+                   CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
+       /* unused */
+       { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
+       /* Target autonomous hif_memcpy */
+       { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
+       /* ce_diag, the Diagnostic Window */
+       { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
+};
+
+/* Target firmware's Copy Engine configuration. */
+static const struct ce_pipe_config target_ce_config_wlan[] = {
+       /* host->target HTC control and raw streams */
+       { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
+       /* target->host HTT + HTC control */
+       { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
+       /* target->host WMI */
+       { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
+       /* host->target WMI */
+       { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
+       /* host->target HTT */
+       { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
+       /* NB: 50% of src nentries, since tx has 2 frags */
+       /* unused */
+       { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
+       /* Reserved for target autonomous hif_memcpy */
+       { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
+       /* CE7 used only by Host */
+};
+
+/*
+ * Diagnostic read/write access is provided for startup/config/debug usage.
+ * Caller must guarantee proper alignment, when applicable, and single user
+ * at any moment.
+ */
+static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
+                                   int nbytes)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret = 0;
+       u32 buf;
+       unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+       unsigned int id;
+       unsigned int flags;
+       struct ce_state *ce_diag;
+       /* Host buffer address in CE space */
+       u32 ce_data;
+       dma_addr_t ce_data_base = 0;
+       void *data_buf = NULL;
+       int i;
+
+       /*
+        * This code cannot handle reads to non-memory space. Redirect to the
+        * register read fn but preserve the multi word read capability of
+        * this fn
+        */
+       if (address < DRAM_BASE_ADDRESS) {
+               if (!IS_ALIGNED(address, 4) ||
+                   !IS_ALIGNED((unsigned long)data, 4))
+                       return -EIO;
+
+               while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
+                                          ar, address, (u32 *)data)) == 0)) {
+                       nbytes -= sizeof(u32);
+                       address += sizeof(u32);
+                       data += sizeof(u32);
+               }
+               return ret;
+       }
+
+       ce_diag = ar_pci->ce_diag;
+
+       /*
+        * Allocate a temporary bounce buffer to hold caller's data
+        * to be DMA'ed from Target. This guarantees
+        *   1) 4-byte alignment
+        *   2) Buffer in DMA-able space
+        */
+       orig_nbytes = nbytes;
+       data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
+                                                        orig_nbytes,
+                                                        &ce_data_base);
+
+       if (!data_buf) {
+               ret = -ENOMEM;
+               goto done;
+       }
+       memset(data_buf, 0, orig_nbytes);
+
+       remaining_bytes = orig_nbytes;
+       ce_data = ce_data_base;
+       while (remaining_bytes) {
+               nbytes = min_t(unsigned int, remaining_bytes,
+                              DIAG_TRANSFER_LIMIT);
+
+               ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
+               if (ret != 0)
+                       goto done;
+
+               /* Request CE to send from Target(!) address to Host buffer */
+               /*
+                * The address supplied by the caller is in the
+                * Target CPU virtual address space.
+                *
+                * In order to use this address with the diagnostic CE,
+                * convert it from Target CPU virtual address space
+                * to CE address space
+                */
+               ath10k_pci_wake(ar);
+               address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
+                                                    address);
+               ath10k_pci_sleep(ar);
+
+               ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
+                                0);
+               if (ret)
+                       goto done;
+
+               i = 0;
+               while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
+                                                    &completed_nbytes,
+                                                    &id) != 0) {
+                       mdelay(1);
+                       if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+                               ret = -EBUSY;
+                               goto done;
+                       }
+               }
+
+               if (nbytes != completed_nbytes) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               if (buf != (u32) address) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               i = 0;
+               while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
+                                                    &completed_nbytes,
+                                                    &id, &flags) != 0) {
+                       mdelay(1);
+
+                       if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+                               ret = -EBUSY;
+                               goto done;
+                       }
+               }
+
+               if (nbytes != completed_nbytes) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               if (buf != ce_data) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               remaining_bytes -= nbytes;
+               address += nbytes;
+               ce_data += nbytes;
+       }
+
+done:
+       if (ret == 0) {
+               /* Copy data from allocated DMA buf to caller's buf */
+               WARN_ON_ONCE(orig_nbytes & 3);
+               for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
+                       ((u32 *)data)[i] =
+                               __le32_to_cpu(((__le32 *)data_buf)[i]);
+               }
+       } else
+               ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
+                          __func__, address);
+
+       if (data_buf)
+               pci_free_consistent(ar_pci->pdev, orig_nbytes,
+                                   data_buf, ce_data_base);
+
+       return ret;
+}
+
+/* Read 4-byte aligned data from Target memory or register */
+static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
+                                      u32 *data)
+{
+       /* Assume range doesn't cross this boundary */
+       if (address >= DRAM_BASE_ADDRESS)
+               return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
+
+       ath10k_pci_wake(ar);
+       *data = ath10k_pci_read32(ar, address);
+       ath10k_pci_sleep(ar);
+       return 0;
+}
+
+static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+                                    const void *data, int nbytes)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret = 0;
+       u32 buf;
+       unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
+       unsigned int id;
+       unsigned int flags;
+       struct ce_state *ce_diag;
+       void *data_buf = NULL;
+       u32 ce_data;    /* Host buffer address in CE space */
+       dma_addr_t ce_data_base = 0;
+       int i;
+
+       ce_diag = ar_pci->ce_diag;
+
+       /*
+        * Allocate a temporary bounce buffer to hold caller's data
+        * to be DMA'ed to Target. This guarantees
+        *   1) 4-byte alignment
+        *   2) Buffer in DMA-able space
+        */
+       orig_nbytes = nbytes;
+       data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
+                                                        orig_nbytes,
+                                                        &ce_data_base);
+       if (!data_buf) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       /* Copy caller's data to allocated DMA buf */
+       WARN_ON_ONCE(orig_nbytes & 3);
+       for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
+               ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
+
+       /*
+        * The address supplied by the caller is in the
+        * Target CPU virtual address space.
+        *
+        * In order to use this address with the diagnostic CE,
+        * convert it from
+        *    Target CPU virtual address space
+        * to
+        *    CE address space
+        */
+       ath10k_pci_wake(ar);
+       address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
+       ath10k_pci_sleep(ar);
+
+       remaining_bytes = orig_nbytes;
+       ce_data = ce_data_base;
+       while (remaining_bytes) {
+               /* FIXME: check cast */
+               nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
+
+               /* Set up to receive directly into Target(!) address */
+               ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
+               if (ret != 0)
+                       goto done;
+
+               /*
+                * Request CE to send caller-supplied data that
+                * was copied to bounce buffer to Target(!) address.
+                */
+               ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
+                                    nbytes, 0, 0);
+               if (ret != 0)
+                       goto done;
+
+               i = 0;
+               while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
+                                                    &completed_nbytes,
+                                                    &id) != 0) {
+                       mdelay(1);
+
+                       if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+                               ret = -EBUSY;
+                               goto done;
+                       }
+               }
+
+               if (nbytes != completed_nbytes) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               if (buf != ce_data) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               i = 0;
+               while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
+                                                    &completed_nbytes,
+                                                    &id, &flags) != 0) {
+                       mdelay(1);
+
+                       if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+                               ret = -EBUSY;
+                               goto done;
+                       }
+               }
+
+               if (nbytes != completed_nbytes) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               if (buf != address) {
+                       ret = -EIO;
+                       goto done;
+               }
+
+               remaining_bytes -= nbytes;
+               address += nbytes;
+               ce_data += nbytes;
+       }
+
+done:
+       if (data_buf) {
+               pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
+                                   ce_data_base);
+       }
+
+       if (ret != 0)
+               ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
+                          address);
+
+       return ret;
+}
+
+/* Write 4B data to Target memory or register */
+static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
+                                       u32 data)
+{
+       /* Assume range doesn't cross this boundary */
+       if (address >= DRAM_BASE_ADDRESS)
+               return ath10k_pci_diag_write_mem(ar, address, &data,
+                                                sizeof(u32));
+
+       ath10k_pci_wake(ar);
+       ath10k_pci_write32(ar, address, data);
+       ath10k_pci_sleep(ar);
+       return 0;
+}
+
+static bool ath10k_pci_target_is_awake(struct ath10k *ar)
+{
+       void __iomem *mem = ath10k_pci_priv(ar)->mem;
+       u32 val;
+       val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
+                      RTC_STATE_ADDRESS);
+       return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
+}
+
+static void ath10k_pci_wait(struct ath10k *ar)
+{
+       int n = 100;
+
+       while (n-- && !ath10k_pci_target_is_awake(ar))
+               msleep(10);
+
+       if (n < 0)
+               ath10k_warn("Unable to wakeup target\n");
+}
+
+void ath10k_do_pci_wake(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       void __iomem *pci_addr = ar_pci->mem;
+       int tot_delay = 0;
+       int curr_delay = 5;
+
+       if (atomic_read(&ar_pci->keep_awake_count) == 0) {
+               /* Force AWAKE */
+               iowrite32(PCIE_SOC_WAKE_V_MASK,
+                         pci_addr + PCIE_LOCAL_BASE_ADDRESS +
+                         PCIE_SOC_WAKE_ADDRESS);
+       }
+       atomic_inc(&ar_pci->keep_awake_count);
+
+       if (ar_pci->verified_awake)
+               return;
+
+       for (;;) {
+               if (ath10k_pci_target_is_awake(ar)) {
+                       ar_pci->verified_awake = true;
+                       break;
+               }
+
+               if (tot_delay > PCIE_WAKE_TIMEOUT) {
+                       ath10k_warn("target takes too long to wake up (awake count %d)\n",
+                                   atomic_read(&ar_pci->keep_awake_count));
+                       break;
+               }
+
+               udelay(curr_delay);
+               tot_delay += curr_delay;
+
+               if (curr_delay < 50)
+                       curr_delay += 5;
+       }
+}
+
+void ath10k_do_pci_sleep(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       void __iomem *pci_addr = ar_pci->mem;
+
+       if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
+               /* Allow sleep */
+               ar_pci->verified_awake = false;
+               iowrite32(PCIE_SOC_WAKE_RESET,
+                         pci_addr + PCIE_LOCAL_BASE_ADDRESS +
+                         PCIE_SOC_WAKE_ADDRESS);
+       }
+}
+
+/*
+ * FIXME: Handle OOM properly.
+ */
+static inline
+struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
+{
+       struct ath10k_pci_compl *compl = NULL;
+
+       spin_lock_bh(&pipe_info->pipe_lock);
+       if (list_empty(&pipe_info->compl_free)) {
+               ath10k_warn("Completion buffers are full\n");
+               goto exit;
+       }
+       compl = list_first_entry(&pipe_info->compl_free,
+                                struct ath10k_pci_compl, list);
+       list_del(&compl->list);
+exit:
+       spin_unlock_bh(&pipe_info->pipe_lock);
+       return compl;
+}
+
+/* Called by lower (CE) layer when a send to Target completes. */
+static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
+                                   void *transfer_context,
+                                   u32 ce_data,
+                                   unsigned int nbytes,
+                                   unsigned int transfer_id)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+       struct ath10k_pci_compl *compl;
+       bool process = false;
+
+       do {
+               /*
+                * For the send completion of an item in sendlist, just
+                * increment num_sends_allowed. The upper layer callback will
+                * be triggered when last fragment is done with send.
+                */
+               if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
+                       spin_lock_bh(&pipe_info->pipe_lock);
+                       pipe_info->num_sends_allowed++;
+                       spin_unlock_bh(&pipe_info->pipe_lock);
+                       continue;
+               }
+
+               compl = get_free_compl(pipe_info);
+               if (!compl)
+                       break;
+
+               compl->send_or_recv = HIF_CE_COMPLETE_SEND;
+               compl->ce_state = ce_state;
+               compl->pipe_info = pipe_info;
+               compl->transfer_context = transfer_context;
+               compl->nbytes = nbytes;
+               compl->transfer_id = transfer_id;
+               compl->flags = 0;
+
+               /*
+                * Add the completion to the processing queue.
+                */
+               spin_lock_bh(&ar_pci->compl_lock);
+               list_add_tail(&compl->list, &ar_pci->compl_process);
+               spin_unlock_bh(&ar_pci->compl_lock);
+
+               process = true;
+       } while (ath10k_ce_completed_send_next(ce_state,
+                                                          &transfer_context,
+                                                          &ce_data, &nbytes,
+                                                          &transfer_id) == 0);
+
+       /*
+        * If only some of the items within a sendlist have completed,
+        * don't invoke completion processing until the entire sendlist
+        * has been sent.
+        */
+       if (!process)
+               return;
+
+       ath10k_pci_process_ce(ar);
+}
+
+/* Called by lower (CE) layer when data is received from the Target. */
+static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
+                                   void *transfer_context, u32 ce_data,
+                                   unsigned int nbytes,
+                                   unsigned int transfer_id,
+                                   unsigned int flags)
+{
+       struct ath10k *ar = ce_state->ar;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+       struct ath10k_pci_compl *compl;
+       struct sk_buff *skb;
+
+       do {
+               compl = get_free_compl(pipe_info);
+               if (!compl)
+                       break;
+
+               compl->send_or_recv = HIF_CE_COMPLETE_RECV;
+               compl->ce_state = ce_state;
+               compl->pipe_info = pipe_info;
+               compl->transfer_context = transfer_context;
+               compl->nbytes = nbytes;
+               compl->transfer_id = transfer_id;
+               compl->flags = flags;
+
+               skb = transfer_context;
+               dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+                                skb->len + skb_tailroom(skb),
+                                DMA_FROM_DEVICE);
+               /*
+                * Add the completion to the processing queue.
+                */
+               spin_lock_bh(&ar_pci->compl_lock);
+               list_add_tail(&compl->list, &ar_pci->compl_process);
+               spin_unlock_bh(&ar_pci->compl_lock);
+
+       } while (ath10k_ce_completed_recv_next(ce_state,
+                                                          &transfer_context,
+                                                          &ce_data, &nbytes,
+                                                          &transfer_id,
+                                                          &flags) == 0);
+
+       ath10k_pci_process_ce(ar);
+}
+
+/* Send the first nbytes bytes of the buffer */
+static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
+                                   unsigned int transfer_id,
+                                   unsigned int bytes, struct sk_buff *nbuf)
+{
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
+       struct ce_state *ce_hdl = pipe_info->ce_hdl;
+       struct ce_sendlist sendlist;
+       unsigned int len;
+       u32 flags = 0;
+       int ret;
+
+       memset(&sendlist, 0, sizeof(struct ce_sendlist));
+
+       len = min(bytes, nbuf->len);
+       bytes -= len;
+
+       if (len & 3)
+               ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
+
+       ath10k_dbg(ATH10K_DBG_PCI,
+                  "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
+                  nbuf->data, (unsigned long long) skb_cb->paddr,
+                  nbuf->len, len);
+       ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
+                       "ath10k tx: data: ",
+                       nbuf->data, nbuf->len);
+
+       ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
+
+       /* Make sure we have resources to handle this request */
+       spin_lock_bh(&pipe_info->pipe_lock);
+       if (!pipe_info->num_sends_allowed) {
+               ath10k_warn("Pipe: %d is full\n", pipe_id);
+               spin_unlock_bh(&pipe_info->pipe_lock);
+               return -ENOSR;
+       }
+       pipe_info->num_sends_allowed--;
+       spin_unlock_bh(&pipe_info->pipe_lock);
+
+       ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
+       if (ret)
+               ath10k_warn("CE send failed: %p\n", nbuf);
+
+       return ret;
+}
+
+static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
+       int ret;
+
+       spin_lock_bh(&pipe_info->pipe_lock);
+       ret = pipe_info->num_sends_allowed;
+       spin_unlock_bh(&pipe_info->pipe_lock);
+
+       return ret;
+}
+
+static void ath10k_pci_hif_dump_area(struct ath10k *ar)
+{
+       u32 reg_dump_area = 0;
+       u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
+       u32 host_addr;
+       int ret;
+       u32 i;
+
+       ath10k_err("firmware crashed!\n");
+       ath10k_err("hardware name %s version 0x%x\n",
+                  ar->hw_params.name, ar->target_version);
+       ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
+                  ar->fw_version_minor, ar->fw_version_release,
+                  ar->fw_version_build);
+
+       host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
+       if (ath10k_pci_diag_read_mem(ar, host_addr,
+                                    &reg_dump_area, sizeof(u32)) != 0) {
+               ath10k_warn("could not read hi_failure_state\n");
+               return;
+       }
+
+       ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
+
+       ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
+                                      &reg_dump_values[0],
+                                      REG_DUMP_COUNT_QCA988X * sizeof(u32));
+       if (ret != 0) {
+               ath10k_err("could not dump FW Dump Area\n");
+               return;
+       }
+
+       BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
+
+       ath10k_err("target Register Dump\n");
+       for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
+               ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+                          i,
+                          reg_dump_values[i],
+                          reg_dump_values[i + 1],
+                          reg_dump_values[i + 2],
+                          reg_dump_values[i + 3]);
+}
+
+static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+                                              int force)
+{
+       if (!force) {
+               int resources;
+               /*
+                * Decide whether to actually poll for completions, or just
+                * wait for a later chance.
+                * If there seem to be plenty of resources left, then just wait
+                * since checking involves reading a CE register, which is a
+                * relatively expensive operation.
+                */
+               resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
+
+               /*
+                * If at least 50% of the total resources are still available,
+                * don't bother checking again yet.
+                */
+               if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+                       return;
+       }
+       ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static void ath10k_pci_hif_post_init(struct ath10k *ar,
+                                    struct ath10k_hif_cb *callbacks)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+       memcpy(&ar_pci->msg_callbacks_current, callbacks,
+              sizeof(ar_pci->msg_callbacks_current));
+}
+
+static int ath10k_pci_start_ce(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_state *ce_diag = ar_pci->ce_diag;
+       const struct ce_attr *attr;
+       struct hif_ce_pipe_info *pipe_info;
+       struct ath10k_pci_compl *compl;
+       int i, pipe_num, completions, disable_interrupts;
+
+       spin_lock_init(&ar_pci->compl_lock);
+       INIT_LIST_HEAD(&ar_pci->compl_process);
+
+       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+
+               spin_lock_init(&pipe_info->pipe_lock);
+               INIT_LIST_HEAD(&pipe_info->compl_free);
+
+               /* Handle Diagnostic CE specially */
+               if (pipe_info->ce_hdl == ce_diag)
+                       continue;
+
+               attr = &host_ce_config_wlan[pipe_num];
+               completions = 0;
+
+               if (attr->src_nentries) {
+                       disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
+                       ath10k_ce_send_cb_register(pipe_info->ce_hdl,
+                                                  ath10k_pci_ce_send_done,
+                                                  disable_interrupts);
+                       completions += attr->src_nentries;
+                       pipe_info->num_sends_allowed = attr->src_nentries - 1;
+               }
+
+               if (attr->dest_nentries) {
+                       ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
+                                                  ath10k_pci_ce_recv_data);
+                       completions += attr->dest_nentries;
+               }
+
+               if (completions == 0)
+                       continue;
+
+               for (i = 0; i < completions; i++) {
+                       compl = kmalloc(sizeof(struct ath10k_pci_compl),
+                                       GFP_KERNEL);
+                       if (!compl) {
+                               ath10k_warn("No memory for completion state\n");
+                               ath10k_pci_stop_ce(ar);
+                               return -ENOMEM;
+                       }
+
+                       compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+                       list_add_tail(&compl->list, &pipe_info->compl_free);
+               }
+       }
+
+       return 0;
+}
+
+static void ath10k_pci_stop_ce(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_pci_compl *compl;
+       struct sk_buff *skb;
+       int i;
+
+       ath10k_ce_disable_interrupts(ar);
+
+       /* Cancel the pending tasklet */
+       tasklet_kill(&ar_pci->intr_tq);
+
+       for (i = 0; i < CE_COUNT; i++)
+               tasklet_kill(&ar_pci->pipe_info[i].intr);
+
+       /* Mark pending completions as aborted, so that upper layers free up
+        * their associated resources */
+       spin_lock_bh(&ar_pci->compl_lock);
+       list_for_each_entry(compl, &ar_pci->compl_process, list) {
+               skb = (struct sk_buff *)compl->transfer_context;
+               ATH10K_SKB_CB(skb)->is_aborted = true;
+       }
+       spin_unlock_bh(&ar_pci->compl_lock);
+}
+
+static void ath10k_pci_cleanup_ce(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_pci_compl *compl, *tmp;
+       struct hif_ce_pipe_info *pipe_info;
+       struct sk_buff *netbuf;
+       int pipe_num;
+
+       /* Free pending completions. */
+       spin_lock_bh(&ar_pci->compl_lock);
+       if (!list_empty(&ar_pci->compl_process))
+               ath10k_warn("pending completions still present! possible memory leaks.\n");
+
+       list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
+               list_del(&compl->list);
+               netbuf = (struct sk_buff *)compl->transfer_context;
+               dev_kfree_skb_any(netbuf);
+               kfree(compl);
+       }
+       spin_unlock_bh(&ar_pci->compl_lock);
+
+       /* Free unused completions for each pipe. */
+       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+
+               spin_lock_bh(&pipe_info->pipe_lock);
+               list_for_each_entry_safe(compl, tmp,
+                                        &pipe_info->compl_free, list) {
+                       list_del(&compl->list);
+                       kfree(compl);
+               }
+               spin_unlock_bh(&pipe_info->pipe_lock);
+       }
+}
+
+static void ath10k_pci_process_ce(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ar->hif.priv;
+       struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
+       struct ath10k_pci_compl *compl;
+       struct sk_buff *skb;
+       unsigned int nbytes;
+       int ret, send_done = 0;
+
+       /* Upper layers aren't ready to handle tx/rx completions in parallel so
+        * we must serialize all completion processing. */
+
+       spin_lock_bh(&ar_pci->compl_lock);
+       if (ar_pci->compl_processing) {
+               spin_unlock_bh(&ar_pci->compl_lock);
+               return;
+       }
+       ar_pci->compl_processing = true;
+       spin_unlock_bh(&ar_pci->compl_lock);
+
+       for (;;) {
+               spin_lock_bh(&ar_pci->compl_lock);
+               if (list_empty(&ar_pci->compl_process)) {
+                       spin_unlock_bh(&ar_pci->compl_lock);
+                       break;
+               }
+               compl = list_first_entry(&ar_pci->compl_process,
+                                        struct ath10k_pci_compl, list);
+               list_del(&compl->list);
+               spin_unlock_bh(&ar_pci->compl_lock);
+
+               if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
+                       cb->tx_completion(ar,
+                                         compl->transfer_context,
+                                         compl->transfer_id);
+                       send_done = 1;
+               } else {
+                       ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
+                       if (ret) {
+                               ath10k_warn("Unable to post recv buffer for pipe: %d\n",
+                                           compl->pipe_info->pipe_num);
+                               break;
+                       }
+
+                       skb = (struct sk_buff *)compl->transfer_context;
+                       nbytes = compl->nbytes;
+
+                       ath10k_dbg(ATH10K_DBG_PCI,
+                                  "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
+                                  skb, nbytes);
+                       ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
+                                       "ath10k rx: ", skb->data, nbytes);
+
+                       if (skb->len + skb_tailroom(skb) >= nbytes) {
+                               skb_trim(skb, 0);
+                               skb_put(skb, nbytes);
+                               cb->rx_completion(ar, skb,
+                                                 compl->pipe_info->pipe_num);
+                       } else {
+                               ath10k_warn("rxed more than expected (nbytes %d, max %d)",
+                                           nbytes,
+                                           skb->len + skb_tailroom(skb));
+                       }
+               }
+
+               compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+
+               /*
+                * Add completion back to the pipe's free list.
+                */
+               spin_lock_bh(&compl->pipe_info->pipe_lock);
+               list_add_tail(&compl->list, &compl->pipe_info->compl_free);
+               compl->pipe_info->num_sends_allowed += send_done;
+               spin_unlock_bh(&compl->pipe_info->pipe_lock);
+       }
+
+       spin_lock_bh(&ar_pci->compl_lock);
+       ar_pci->compl_processing = false;
+       spin_unlock_bh(&ar_pci->compl_lock);
+}
+
+/* TODO - temporary mapping while we have too few CE's */
+static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
+                                             u16 service_id, u8 *ul_pipe,
+                                             u8 *dl_pipe, int *ul_is_polled,
+                                             int *dl_is_polled)
+{
+       int ret = 0;
+
+       /* polling for received messages not supported */
+       *dl_is_polled = 0;
+
+       switch (service_id) {
+       case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+               /*
+                * Host->target HTT gets its own pipe, so it can be polled
+                * while other pipes are interrupt driven.
+                */
+               *ul_pipe = 4;
+               /*
+                * Use the same target->host pipe for HTC ctrl, HTC raw
+                * streams, and HTT.
+                */
+               *dl_pipe = 1;
+               break;
+
+       case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+       case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
+               /*
+                * Note: HTC_RAW_STREAMS_SVC is currently unused, and
+                * HTC_CTRL_RSVD_SVC could share the same pipe as the
+                * WMI services.  So, if another CE is needed, change
+                * this to *ul_pipe = 3, which frees up CE 0.
+                */
+               /* *ul_pipe = 3; */
+               *ul_pipe = 0;
+               *dl_pipe = 1;
+               break;
+
+       case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
+       case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
+       case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
+       case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
+
+       case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+               *ul_pipe = 3;
+               *dl_pipe = 2;
+               break;
+
+               /* pipe 5 unused   */
+               /* pipe 6 reserved */
+               /* pipe 7 reserved */
+
+       default:
+               ret = -1;
+               break;
+       }
+       *ul_is_polled =
+               (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
+
+       return ret;
+}
+
+static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
+                                               u8 *ul_pipe, u8 *dl_pipe)
+{
+       int ul_is_polled, dl_is_polled;
+
+       (void)ath10k_pci_hif_map_service_to_pipe(ar,
+                                                ATH10K_HTC_SVC_ID_RSVD_CTRL,
+                                                ul_pipe,
+                                                dl_pipe,
+                                                &ul_is_polled,
+                                                &dl_is_polled);
+}
+
+static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+                                  int num)
+{
+       struct ath10k *ar = pipe_info->hif_ce_state;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_state *ce_state = pipe_info->ce_hdl;
+       struct sk_buff *skb;
+       dma_addr_t ce_data;
+       int i, ret = 0;
+
+       if (pipe_info->buf_sz == 0)
+               return 0;
+
+       for (i = 0; i < num; i++) {
+               skb = dev_alloc_skb(pipe_info->buf_sz);
+               if (!skb) {
+                       ath10k_warn("could not allocate skbuff for pipe %d\n",
+                                   num);
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
+               WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+               ce_data = dma_map_single(ar->dev, skb->data,
+                                        skb->len + skb_tailroom(skb),
+                                        DMA_FROM_DEVICE);
+
+               if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
+                       ath10k_warn("could not dma map skbuff\n");
+                       dev_kfree_skb_any(skb);
+                       ret = -EIO;
+                       goto err;
+               }
+
+               ATH10K_SKB_CB(skb)->paddr = ce_data;
+
+               pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
+                                              pipe_info->buf_sz,
+                                              PCI_DMA_FROMDEVICE);
+
+               ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
+                                                ce_data);
+               if (ret) {
+                       ath10k_warn("could not enqueue to pipe %d (%d)\n",
+                                   num, ret);
+                       goto err;
+               }
+       }
+
+       return ret;
+
+err:
+       ath10k_pci_rx_pipe_cleanup(pipe_info);
+       return ret;
+}
+
+static int ath10k_pci_post_rx(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info;
+       const struct ce_attr *attr;
+       int pipe_num, ret = 0;
+
+       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+               attr = &host_ce_config_wlan[pipe_num];
+
+               if (attr->dest_nentries == 0)
+                       continue;
+
+               ret = ath10k_pci_post_rx_pipe(pipe_info,
+                                             attr->dest_nentries - 1);
+               if (ret) {
+                       ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
+                                   pipe_num);
+
+                       for (; pipe_num >= 0; pipe_num--) {
+                               pipe_info = &ar_pci->pipe_info[pipe_num];
+                               ath10k_pci_rx_pipe_cleanup(pipe_info);
+                       }
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ath10k_pci_hif_start(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       ret = ath10k_pci_start_ce(ar);
+       if (ret) {
+               ath10k_warn("could not start CE (%d)\n", ret);
+               return ret;
+       }
+
+       /* Post buffers once to start things off. */
+       ret = ath10k_pci_post_rx(ar);
+       if (ret) {
+               ath10k_warn("could not post rx pipes (%d)\n", ret);
+               return ret;
+       }
+
+       ar_pci->started = 1;
+       return 0;
+}
+
+static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+{
+       struct ath10k *ar;
+       struct ath10k_pci *ar_pci;
+       struct ce_state *ce_hdl;
+       u32 buf_sz;
+       struct sk_buff *netbuf;
+       u32 ce_data;
+
+       buf_sz = pipe_info->buf_sz;
+
+       /* Unused Copy Engine */
+       if (buf_sz == 0)
+               return;
+
+       ar = pipe_info->hif_ce_state;
+       ar_pci = ath10k_pci_priv(ar);
+
+       if (!ar_pci->started)
+               return;
+
+       ce_hdl = pipe_info->ce_hdl;
+
+       while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
+                                         &ce_data) == 0) {
+               dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
+                                netbuf->len + skb_tailroom(netbuf),
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb_any(netbuf);
+       }
+}
+
+static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+{
+       struct ath10k *ar;
+       struct ath10k_pci *ar_pci;
+       struct ce_state *ce_hdl;
+       struct sk_buff *netbuf;
+       u32 ce_data;
+       unsigned int nbytes;
+       unsigned int id;
+       u32 buf_sz;
+
+       buf_sz = pipe_info->buf_sz;
+
+       /* Unused Copy Engine */
+       if (buf_sz == 0)
+               return;
+
+       ar = pipe_info->hif_ce_state;
+       ar_pci = ath10k_pci_priv(ar);
+
+       if (!ar_pci->started)
+               return;
+
+       ce_hdl = pipe_info->ce_hdl;
+
+       while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
+                                         &ce_data, &nbytes, &id) == 0) {
+               if (netbuf != CE_SENDLIST_ITEM_CTXT)
+                       /*
+                        * Indicate the completion to higer layer to free
+                        * the buffer
+                        */
+                       ATH10K_SKB_CB(netbuf)->is_aborted = true;
+                       ar_pci->msg_callbacks_current.tx_completion(ar,
+                                                                   netbuf,
+                                                                   id);
+       }
+}
+
+/*
+ * Cleanup residual buffers for device shutdown:
+ *    buffers that were enqueued for receive
+ *    buffers that were to be sent
+ * Note: Buffers that had completed but which were
+ * not yet processed are on a completion queue. They
+ * are handled when the completion thread shuts down.
+ */
+static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int pipe_num;
+
+       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+               struct hif_ce_pipe_info *pipe_info;
+
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+               ath10k_pci_rx_pipe_cleanup(pipe_info);
+               ath10k_pci_tx_pipe_cleanup(pipe_info);
+       }
+}
+
+static void ath10k_pci_ce_deinit(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info;
+       int pipe_num;
+
+       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+               if (pipe_info->ce_hdl) {
+                       ath10k_ce_deinit(pipe_info->ce_hdl);
+                       pipe_info->ce_hdl = NULL;
+                       pipe_info->buf_sz = 0;
+               }
+       }
+}
+
+static void ath10k_pci_hif_stop(struct ath10k *ar)
+{
+       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+       ath10k_pci_stop_ce(ar);
+
+       /* At this point, asynchronous threads are stopped, the target should
+        * not DMA nor interrupt. We process the leftovers and then free
+        * everything else up. */
+
+       ath10k_pci_process_ce(ar);
+       ath10k_pci_cleanup_ce(ar);
+       ath10k_pci_buffer_cleanup(ar);
+       ath10k_pci_ce_deinit(ar);
+}
+
+static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
+                                          void *req, u32 req_len,
+                                          void *resp, u32 *resp_len)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
+       struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
+       dma_addr_t req_paddr = 0;
+       dma_addr_t resp_paddr = 0;
+       struct bmi_xfer xfer = {};
+       void *treq, *tresp = NULL;
+       int ret = 0;
+
+       if (resp && !resp_len)
+               return -EINVAL;
+
+       if (resp && resp_len && *resp_len == 0)
+               return -EINVAL;
+
+       treq = kmemdup(req, req_len, GFP_KERNEL);
+       if (!treq)
+               return -ENOMEM;
+
+       req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
+       ret = dma_mapping_error(ar->dev, req_paddr);
+       if (ret)
+               goto err_dma;
+
+       if (resp && resp_len) {
+               tresp = kzalloc(*resp_len, GFP_KERNEL);
+               if (!tresp) {
+                       ret = -ENOMEM;
+                       goto err_req;
+               }
+
+               resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
+                                           DMA_FROM_DEVICE);
+               ret = dma_mapping_error(ar->dev, resp_paddr);
+               if (ret)
+                       goto err_req;
+
+               xfer.wait_for_resp = true;
+               xfer.resp_len = 0;
+
+               ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
+       }
+
+       init_completion(&xfer.done);
+
+       ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
+       if (ret)
+               goto err_resp;
+
+       ret = wait_for_completion_timeout(&xfer.done,
+                                         BMI_COMMUNICATION_TIMEOUT_HZ);
+       if (ret <= 0) {
+               u32 unused_buffer;
+               unsigned int unused_nbytes;
+               unsigned int unused_id;
+
+               ret = -ETIMEDOUT;
+               ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
+                                          &unused_nbytes, &unused_id);
+       } else {
+               /* non-zero means we did not time out */
+               ret = 0;
+       }
+
+err_resp:
+       if (resp) {
+               u32 unused_buffer;
+
+               ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
+               dma_unmap_single(ar->dev, resp_paddr,
+                                *resp_len, DMA_FROM_DEVICE);
+       }
+err_req:
+       dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
+
+       if (ret == 0 && resp_len) {
+               *resp_len = min(*resp_len, xfer.resp_len);
+               memcpy(resp, tresp, xfer.resp_len);
+       }
+err_dma:
+       kfree(treq);
+       kfree(tresp);
+
+       return ret;
+}
+
+static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
+                                    void *transfer_context,
+                                    u32 data,
+                                    unsigned int nbytes,
+                                    unsigned int transfer_id)
+{
+       struct bmi_xfer *xfer = transfer_context;
+
+       if (xfer->wait_for_resp)
+               return;
+
+       complete(&xfer->done);
+}
+
+static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
+                                    void *transfer_context,
+                                    u32 data,
+                                    unsigned int nbytes,
+                                    unsigned int transfer_id,
+                                    unsigned int flags)
+{
+       struct bmi_xfer *xfer = transfer_context;
+
+       if (!xfer->wait_for_resp) {
+               ath10k_warn("unexpected: BMI data received; ignoring\n");
+               return;
+       }
+
+       xfer->resp_len = nbytes;
+       complete(&xfer->done);
+}
+
+/*
+ * Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_VO,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                3,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_VO,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                2,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_BK,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                3,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_BK,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                2,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_BE,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                3,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_BE,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                2,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_VI,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                3,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_DATA_VI,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                2,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_CONTROL,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                3,
+       },
+       {
+                ATH10K_HTC_SVC_ID_WMI_CONTROL,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                2,
+       },
+       {
+                ATH10K_HTC_SVC_ID_RSVD_CTRL,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                0,             /* could be moved to 3 (share with WMI) */
+       },
+       {
+                ATH10K_HTC_SVC_ID_RSVD_CTRL,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                1,
+       },
+       {
+                ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                0,
+       },
+       {
+                ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                1,
+       },
+       {
+                ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
+                PIPEDIR_OUT,           /* out = UL = host -> target */
+                4,
+       },
+       {
+                ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
+                PIPEDIR_IN,            /* in = DL = target -> host */
+                1,
+       },
+
+       /* (Additions here) */
+
+       {                               /* Must be last */
+                0,
+                0,
+                0,
+       },
+};
+
+/*
+ * Send an interrupt to the device to wake up the Target CPU
+ * so it has an opportunity to notice any changed state.
+ */
+static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
+{
+       int ret;
+       u32 core_ctrl;
+
+       ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
+                                             CORE_CTRL_ADDRESS,
+                                         &core_ctrl);
+       if (ret) {
+               ath10k_warn("Unable to read core ctrl\n");
+               return ret;
+       }
+
+       /* A_INUM_FIRMWARE interrupt to Target CPU */
+       core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
+
+       ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
+                                              CORE_CTRL_ADDRESS,
+                                          core_ctrl);
+       if (ret)
+               ath10k_warn("Unable to set interrupt mask\n");
+
+       return ret;
+}
+
+static int ath10k_pci_init_config(struct ath10k *ar)
+{
+       u32 interconnect_targ_addr;
+       u32 pcie_state_targ_addr = 0;
+       u32 pipe_cfg_targ_addr = 0;
+       u32 svc_to_pipe_map = 0;
+       u32 pcie_config_flags = 0;
+       u32 ealloc_value;
+       u32 ealloc_targ_addr;
+       u32 flag2_value;
+       u32 flag2_targ_addr;
+       int ret = 0;
+
+       /* Download to Target the CE Config and the service-to-CE map */
+       interconnect_targ_addr =
+               host_interest_item_address(HI_ITEM(hi_interconnect_state));
+
+       /* Supply Target-side CE configuration */
+       ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
+                                         &pcie_state_targ_addr);
+       if (ret != 0) {
+               ath10k_err("Failed to get pcie state addr: %d\n", ret);
+               return ret;
+       }
+
+       if (pcie_state_targ_addr == 0) {
+               ret = -EIO;
+               ath10k_err("Invalid pcie state addr\n");
+               return ret;
+       }
+
+       ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
+                                         offsetof(struct pcie_state,
+                                                  pipe_cfg_addr),
+                                         &pipe_cfg_targ_addr);
+       if (ret != 0) {
+               ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
+               return ret;
+       }
+
+       if (pipe_cfg_targ_addr == 0) {
+               ret = -EIO;
+               ath10k_err("Invalid pipe cfg addr\n");
+               return ret;
+       }
+
+       ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
+                                target_ce_config_wlan,
+                                sizeof(target_ce_config_wlan));
+
+       if (ret != 0) {
+               ath10k_err("Failed to write pipe cfg: %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
+                                         offsetof(struct pcie_state,
+                                                  svc_to_pipe_map),
+                                         &svc_to_pipe_map);
+       if (ret != 0) {
+               ath10k_err("Failed to get svc/pipe map: %d\n", ret);
+               return ret;
+       }
+
+       if (svc_to_pipe_map == 0) {
+               ret = -EIO;
+               ath10k_err("Invalid svc_to_pipe map\n");
+               return ret;
+       }
+
+       ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
+                                target_service_to_ce_map_wlan,
+                                sizeof(target_service_to_ce_map_wlan));
+       if (ret != 0) {
+               ath10k_err("Failed to write svc/pipe map: %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
+                                         offsetof(struct pcie_state,
+                                                  config_flags),
+                                         &pcie_config_flags);
+       if (ret != 0) {
+               ath10k_err("Failed to get pcie config_flags: %d\n", ret);
+               return ret;
+       }
+
+       pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
+
+       ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
+                                offsetof(struct pcie_state, config_flags),
+                                &pcie_config_flags,
+                                sizeof(pcie_config_flags));
+       if (ret != 0) {
+               ath10k_err("Failed to write pcie config_flags: %d\n", ret);
+               return ret;
+       }
+
+       /* configure early allocation */
+       ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
+
+       ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
+       if (ret != 0) {
+               ath10k_err("Faile to get early alloc val: %d\n", ret);
+               return ret;
+       }
+
+       /* first bank is switched to IRAM */
+       ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
+                        HI_EARLY_ALLOC_MAGIC_MASK);
+       ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
+                        HI_EARLY_ALLOC_IRAM_BANKS_MASK);
+
+       ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
+       if (ret != 0) {
+               ath10k_err("Failed to set early alloc val: %d\n", ret);
+               return ret;
+       }
+
+       /* Tell Target to proceed with initialization */
+       flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
+
+       ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
+       if (ret != 0) {
+               ath10k_err("Failed to get option val: %d\n", ret);
+               return ret;
+       }
+
+       flag2_value |= HI_OPTION_EARLY_CFG_DONE;
+
+       ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
+       if (ret != 0) {
+               ath10k_err("Failed to set option val: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+
+
+static int ath10k_pci_ce_init(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct hif_ce_pipe_info *pipe_info;
+       const struct ce_attr *attr;
+       int pipe_num;
+
+       for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
+               pipe_info = &ar_pci->pipe_info[pipe_num];
+               pipe_info->pipe_num = pipe_num;
+               pipe_info->hif_ce_state = ar;
+               attr = &host_ce_config_wlan[pipe_num];
+
+               pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
+               if (pipe_info->ce_hdl == NULL) {
+                       ath10k_err("Unable to initialize CE for pipe: %d\n",
+                                  pipe_num);
+
+                       /* It is safe to call it here. It checks if ce_hdl is
+                        * valid for each pipe */
+                       ath10k_pci_ce_deinit(ar);
+                       return -1;
+               }
+
+               if (pipe_num == ar_pci->ce_count - 1) {
+                       /*
+                        * Reserve the ultimate CE for
+                        * diagnostic Window support
+                        */
+                       ar_pci->ce_diag =
+                       ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
+                       continue;
+               }
+
+               pipe_info->buf_sz = (size_t) (attr->src_sz_max);
+       }
+
+       /*
+        * Initially, establish CE completion handlers for use with BMI.
+        * These are overwritten with generic handlers after we exit BMI phase.
+        */
+       pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+       ath10k_ce_send_cb_register(pipe_info->ce_hdl,
+                                  ath10k_pci_bmi_send_done, 0);
+
+       pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+       ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
+                                  ath10k_pci_bmi_recv_data);
+
+       return 0;
+}
+
+static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       u32 fw_indicator_address, fw_indicator;
+
+       ath10k_pci_wake(ar);
+
+       fw_indicator_address = ar_pci->fw_indicator_address;
+       fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
+
+       if (fw_indicator & FW_IND_EVENT_PENDING) {
+               /* ACK: clear Target-side pending event */
+               ath10k_pci_write32(ar, fw_indicator_address,
+                                  fw_indicator & ~FW_IND_EVENT_PENDING);
+
+               if (ar_pci->started) {
+                       ath10k_pci_hif_dump_area(ar);
+               } else {
+                       /*
+                        * Probable Target failure before we're prepared
+                        * to handle it.  Generally unexpected.
+                        */
+                       ath10k_warn("early firmware event indicated\n");
+               }
+       }
+
+       ath10k_pci_sleep(ar);
+}
+
+static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
+       .send_head              = ath10k_pci_hif_send_head,
+       .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
+       .start                  = ath10k_pci_hif_start,
+       .stop                   = ath10k_pci_hif_stop,
+       .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
+       .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
+       .send_complete_check    = ath10k_pci_hif_send_complete_check,
+       .init                   = ath10k_pci_hif_post_init,
+       .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
+};
+
+static void ath10k_pci_ce_tasklet(unsigned long ptr)
+{
+       struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
+       struct ath10k_pci *ar_pci = pipe->ar_pci;
+
+       ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
+}
+
+static void ath10k_msi_err_tasklet(unsigned long data)
+{
+       struct ath10k *ar = (struct ath10k *)data;
+
+       ath10k_pci_fw_interrupt_handler(ar);
+}
+
+/*
+ * Handler for a per-engine interrupt on a PARTICULAR CE.
+ * This is used in cases where each CE has a private MSI interrupt.
+ */
+static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
+{
+       struct ath10k *ar = arg;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
+
+       if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
+               ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
+               return IRQ_HANDLED;
+       }
+
+       /*
+        * NOTE: We are able to derive ce_id from irq because we
+        * use a one-to-one mapping for CE's 0..5.
+        * CE's 6 & 7 do not use interrupts at all.
+        *
+        * This mapping must be kept in sync with the mapping
+        * used by firmware.
+        */
+       tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
+{
+       struct ath10k *ar = arg;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       tasklet_schedule(&ar_pci->msi_fw_err);
+       return IRQ_HANDLED;
+}
+
+/*
+ * Top-level interrupt handler for all PCI interrupts from a Target.
+ * When a block of MSI interrupts is allocated, this top-level handler
+ * is not used; instead, we directly call the correct sub-handler.
+ */
+static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
+{
+       struct ath10k *ar = arg;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       if (ar_pci->num_msi_intrs == 0) {
+               /*
+                * IMPORTANT: INTR_CLR regiser has to be set after
+                * INTR_ENABLE is set to 0, otherwise interrupt can not be
+                * really cleared.
+                */
+               iowrite32(0, ar_pci->mem +
+                         (SOC_CORE_BASE_ADDRESS |
+                          PCIE_INTR_ENABLE_ADDRESS));
+               iowrite32(PCIE_INTR_FIRMWARE_MASK |
+                         PCIE_INTR_CE_MASK_ALL,
+                         ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
+                                        PCIE_INTR_CLR_ADDRESS));
+               /*
+                * IMPORTANT: this extra read transaction is required to
+                * flush the posted write buffer.
+                */
+               (void) ioread32(ar_pci->mem +
+                               (SOC_CORE_BASE_ADDRESS |
+                                PCIE_INTR_ENABLE_ADDRESS));
+       }
+
+       tasklet_schedule(&ar_pci->intr_tq);
+
+       return IRQ_HANDLED;
+}
+
+static void ath10k_pci_tasklet(unsigned long data)
+{
+       struct ath10k *ar = (struct ath10k *)data;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
+       ath10k_ce_per_engine_service_any(ar);
+
+       if (ar_pci->num_msi_intrs == 0) {
+               /* Enable Legacy PCI line interrupts */
+               iowrite32(PCIE_INTR_FIRMWARE_MASK |
+                         PCIE_INTR_CE_MASK_ALL,
+                         ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
+                                        PCIE_INTR_ENABLE_ADDRESS));
+               /*
+                * IMPORTANT: this extra read transaction is required to
+                * flush the posted write buffer
+                */
+               (void) ioread32(ar_pci->mem +
+                               (SOC_CORE_BASE_ADDRESS |
+                                PCIE_INTR_ENABLE_ADDRESS));
+       }
+}
+
+static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+       int i;
+
+       ret = pci_enable_msi_block(ar_pci->pdev, num);
+       if (ret)
+               return ret;
+
+       ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
+                         ath10k_pci_msi_fw_handler,
+                         IRQF_SHARED, "ath10k_pci", ar);
+       if (ret)
+               return ret;
+
+       for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
+               ret = request_irq(ar_pci->pdev->irq + i,
+                                 ath10k_pci_per_engine_handler,
+                                 IRQF_SHARED, "ath10k_pci", ar);
+               if (ret) {
+                       ath10k_warn("request_irq(%d) failed %d\n",
+                                   ar_pci->pdev->irq + i, ret);
+
+                       for (; i >= MSI_ASSIGN_CE_INITIAL; i--)
+                               free_irq(ar_pci->pdev->irq, ar);
+
+                       pci_disable_msi(ar_pci->pdev);
+                       return ret;
+               }
+       }
+
+       ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
+       return 0;
+}
+
+static int ath10k_pci_start_intr_msi(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       ret = pci_enable_msi(ar_pci->pdev);
+       if (ret < 0)
+               return ret;
+
+       ret = request_irq(ar_pci->pdev->irq,
+                         ath10k_pci_interrupt_handler,
+                         IRQF_SHARED, "ath10k_pci", ar);
+       if (ret < 0) {
+               pci_disable_msi(ar_pci->pdev);
+               return ret;
+       }
+
+       ath10k_info("MSI interrupt handling\n");
+       return 0;
+}
+
+static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       ret = request_irq(ar_pci->pdev->irq,
+                         ath10k_pci_interrupt_handler,
+                         IRQF_SHARED, "ath10k_pci", ar);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Make sure to wake the Target before enabling Legacy
+        * Interrupt.
+        */
+       iowrite32(PCIE_SOC_WAKE_V_MASK,
+                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                 PCIE_SOC_WAKE_ADDRESS);
+
+       ath10k_pci_wait(ar);
+
+       /*
+        * A potential race occurs here: The CORE_BASE write
+        * depends on target correctly decoding AXI address but
+        * host won't know when target writes BAR to CORE_CTRL.
+        * This write might get lost if target has NOT written BAR.
+        * For now, fix the race by repeating the write in below
+        * synchronization checking.
+        */
+       iowrite32(PCIE_INTR_FIRMWARE_MASK |
+                 PCIE_INTR_CE_MASK_ALL,
+                 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
+                                PCIE_INTR_ENABLE_ADDRESS));
+       iowrite32(PCIE_SOC_WAKE_RESET,
+                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                 PCIE_SOC_WAKE_ADDRESS);
+
+       ath10k_info("legacy interrupt handling\n");
+       return 0;
+}
+
+static int ath10k_pci_start_intr(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int num = MSI_NUM_REQUEST;
+       int ret;
+       int i;
+
+       tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
+       tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
+                    (unsigned long) ar);
+
+       for (i = 0; i < CE_COUNT; i++) {
+               ar_pci->pipe_info[i].ar_pci = ar_pci;
+               tasklet_init(&ar_pci->pipe_info[i].intr,
+                            ath10k_pci_ce_tasklet,
+                            (unsigned long)&ar_pci->pipe_info[i]);
+       }
+
+       if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
+               num = 1;
+
+       if (num > 1) {
+               ret = ath10k_pci_start_intr_msix(ar, num);
+               if (ret == 0)
+                       goto exit;
+
+               ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
+               num = 1;
+       }
+
+       if (num == 1) {
+               ret = ath10k_pci_start_intr_msi(ar);
+               if (ret == 0)
+                       goto exit;
+
+               ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
+                           ret);
+               num = 0;
+       }
+
+       ret = ath10k_pci_start_intr_legacy(ar);
+
+exit:
+       ar_pci->num_msi_intrs = num;
+       ar_pci->ce_count = CE_COUNT;
+       return ret;
+}
+
+static void ath10k_pci_stop_intr(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int i;
+
+       /* There's at least one interrupt irregardless whether its legacy INTR
+        * or MSI or MSI-X */
+       for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+               free_irq(ar_pci->pdev->irq + i, ar);
+
+       if (ar_pci->num_msi_intrs > 0)
+               pci_disable_msi(ar_pci->pdev);
+}
+
+static int ath10k_pci_reset_target(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int wait_limit = 300; /* 3 sec */
+
+       /* Wait for Target to finish initialization before we proceed. */
+       iowrite32(PCIE_SOC_WAKE_V_MASK,
+                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                 PCIE_SOC_WAKE_ADDRESS);
+
+       ath10k_pci_wait(ar);
+
+       while (wait_limit-- &&
+              !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
+                FW_IND_INITIALIZED)) {
+               if (ar_pci->num_msi_intrs == 0)
+                       /* Fix potential race by repeating CORE_BASE writes */
+                       iowrite32(PCIE_INTR_FIRMWARE_MASK |
+                                 PCIE_INTR_CE_MASK_ALL,
+                                 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
+                                                PCIE_INTR_ENABLE_ADDRESS));
+               mdelay(10);
+       }
+
+       if (wait_limit < 0) {
+               ath10k_err("Target stalled\n");
+               iowrite32(PCIE_SOC_WAKE_RESET,
+                         ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                         PCIE_SOC_WAKE_ADDRESS);
+               return -EIO;
+       }
+
+       iowrite32(PCIE_SOC_WAKE_RESET,
+                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                 PCIE_SOC_WAKE_ADDRESS);
+
+       return 0;
+}
+
+static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci)
+{
+       struct ath10k *ar = ar_pci->ar;
+       void __iomem *mem = ar_pci->mem;
+       int i;
+       u32 val;
+
+       if (!SOC_GLOBAL_RESET_ADDRESS)
+               return;
+
+       if (!mem)
+               return;
+
+       ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
+                              PCIE_SOC_WAKE_V_MASK);
+       for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
+               if (ath10k_pci_target_is_awake(ar))
+                       break;
+               msleep(1);
+       }
+
+       /* Put Target, including PCIe, into RESET. */
+       val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
+       val |= 1;
+       ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+
+       for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
+               if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+                                         RTC_STATE_COLD_RESET_MASK)
+                       break;
+               msleep(1);
+       }
+
+       /* Pull Target, including PCIe, out of RESET. */
+       val &= ~1;
+       ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+
+       for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
+               if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+                                           RTC_STATE_COLD_RESET_MASK))
+                       break;
+               msleep(1);
+       }
+
+       ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+}
+
+static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
+{
+       int i;
+
+       for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
+               if (!test_bit(i, ar_pci->features))
+                       continue;
+
+               switch (i) {
+               case ATH10K_PCI_FEATURE_MSI_X:
+                       ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
+                       break;
+               case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND:
+                       ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
+                       break;
+               }
+       }
+}
+
+static int ath10k_pci_probe(struct pci_dev *pdev,
+                           const struct pci_device_id *pci_dev)
+{
+       void __iomem *mem;
+       int ret = 0;
+       struct ath10k *ar;
+       struct ath10k_pci *ar_pci;
+       u32 lcr_val;
+
+       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+       ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
+       if (ar_pci == NULL)
+               return -ENOMEM;
+
+       ar_pci->pdev = pdev;
+       ar_pci->dev = &pdev->dev;
+
+       switch (pci_dev->device) {
+       case QCA988X_1_0_DEVICE_ID:
+               set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features);
+               break;
+       case QCA988X_2_0_DEVICE_ID:
+               set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
+               break;
+       default:
+               ret = -ENODEV;
+               ath10k_err("Unkown device ID: %d\n", pci_dev->device);
+               goto err_ar_pci;
+       }
+
+       ath10k_pci_dump_features(ar_pci);
+
+       ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI,
+                               &ath10k_pci_hif_ops);
+       if (!ar) {
+               ath10k_err("ath10k_core_create failed!\n");
+               ret = -EINVAL;
+               goto err_ar_pci;
+       }
+
+       /* Enable QCA988X_1.0 HW workarounds */
+       if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features))
+               spin_lock_init(&ar_pci->hw_v1_workaround_lock);
+
+       ar_pci->ar = ar;
+       ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
+       atomic_set(&ar_pci->keep_awake_count, 0);
+
+       pci_set_drvdata(pdev, ar);
+
+       /*
+        * Without any knowledge of the Host, the Target may have been reset or
+        * power cycled and its Config Space may no longer reflect the PCI
+        * address space that was assigned earlier by the PCI infrastructure.
+        * Refresh it now.
+        */
+       ret = pci_assign_resource(pdev, BAR_NUM);
+       if (ret) {
+               ath10k_err("cannot assign PCI space: %d\n", ret);
+               goto err_ar;
+       }
+
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               ath10k_err("cannot enable PCI device: %d\n", ret);
+               goto err_ar;
+       }
+
+       /* Request MMIO resources */
+       ret = pci_request_region(pdev, BAR_NUM, "ath");
+       if (ret) {
+               ath10k_err("PCI MMIO reservation error: %d\n", ret);
+               goto err_device;
+       }
+
+       /*
+        * Target structures have a limit of 32 bit DMA pointers.
+        * DMA pointers can be wider than 32 bits by default on some systems.
+        */
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (ret) {
+               ath10k_err("32-bit DMA not available: %d\n", ret);
+               goto err_region;
+       }
+
+       ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (ret) {
+               ath10k_err("cannot enable 32-bit consistent DMA\n");
+               goto err_region;
+       }
+
+       /* Set bus master bit in PCI_COMMAND to enable DMA */
+       pci_set_master(pdev);
+
+       /*
+        * Temporary FIX: disable ASPM
+        * Will be removed after the OTP is programmed
+        */
+       pci_read_config_dword(pdev, 0x80, &lcr_val);
+       pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
+
+       /* Arrange for access to Target SoC registers. */
+       mem = pci_iomap(pdev, BAR_NUM, 0);
+       if (!mem) {
+               ath10k_err("PCI iomap error\n");
+               ret = -EIO;
+               goto err_master;
+       }
+
+       ar_pci->mem = mem;
+
+       spin_lock_init(&ar_pci->ce_lock);
+
+       ar_pci->cacheline_sz = dma_get_cache_alignment();
+
+       ret = ath10k_pci_start_intr(ar);
+       if (ret) {
+               ath10k_err("could not start interrupt handling (%d)\n", ret);
+               goto err_iomap;
+       }
+
+       /*
+        * Bring the target up cleanly.
+        *
+        * The target may be in an undefined state with an AUX-powered Target
+        * and a Host in WoW mode. If the Host crashes, loses power, or is
+        * restarted (without unloading the driver) then the Target is left
+        * (aux) powered and running. On a subsequent driver load, the Target
+        * is in an unexpected state. We try to catch that here in order to
+        * reset the Target and retry the probe.
+        */
+       ath10k_pci_device_reset(ar_pci);
+
+       ret = ath10k_pci_reset_target(ar);
+       if (ret)
+               goto err_intr;
+
+       if (ath10k_target_ps) {
+               ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
+       } else {
+               /* Force AWAKE forever */
+               ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
+               ath10k_do_pci_wake(ar);
+       }
+
+       ret = ath10k_pci_ce_init(ar);
+       if (ret)
+               goto err_intr;
+
+       ret = ath10k_pci_init_config(ar);
+       if (ret)
+               goto err_ce;
+
+       ret = ath10k_pci_wake_target_cpu(ar);
+       if (ret) {
+               ath10k_err("could not wake up target CPU (%d)\n", ret);
+               goto err_ce;
+       }
+
+       ret = ath10k_core_register(ar);
+       if (ret) {
+               ath10k_err("could not register driver core (%d)\n", ret);
+               goto err_ce;
+       }
+
+       return 0;
+
+err_ce:
+       ath10k_pci_ce_deinit(ar);
+err_intr:
+       ath10k_pci_stop_intr(ar);
+err_iomap:
+       pci_iounmap(pdev, mem);
+err_master:
+       pci_clear_master(pdev);
+err_region:
+       pci_release_region(pdev, BAR_NUM);
+err_device:
+       pci_disable_device(pdev);
+err_ar:
+       pci_set_drvdata(pdev, NULL);
+       ath10k_core_destroy(ar);
+err_ar_pci:
+       /* call HIF PCI free here */
+       kfree(ar_pci);
+
+       return ret;
+}
+
+static void ath10k_pci_remove(struct pci_dev *pdev)
+{
+       struct ath10k *ar = pci_get_drvdata(pdev);
+       struct ath10k_pci *ar_pci;
+
+       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+       if (!ar)
+               return;
+
+       ar_pci = ath10k_pci_priv(ar);
+
+       if (!ar_pci)
+               return;
+
+       tasklet_kill(&ar_pci->msi_fw_err);
+
+       ath10k_core_unregister(ar);
+       ath10k_pci_stop_intr(ar);
+
+       pci_set_drvdata(pdev, NULL);
+       pci_iounmap(pdev, ar_pci->mem);
+       pci_release_region(pdev, BAR_NUM);
+       pci_clear_master(pdev);
+       pci_disable_device(pdev);
+
+       ath10k_core_destroy(ar);
+       kfree(ar_pci);
+}
+
+#if defined(CONFIG_PM_SLEEP)
+
+#define ATH10K_PCI_PM_CONTROL 0x44
+
+static int ath10k_pci_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct ath10k *ar = pci_get_drvdata(pdev);
+       struct ath10k_pci *ar_pci;
+       u32 val;
+       int ret, retval;
+
+       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+       if (!ar)
+               return -ENODEV;
+
+       ar_pci = ath10k_pci_priv(ar);
+       if (!ar_pci)
+               return -ENODEV;
+
+       if (ath10k_core_target_suspend(ar))
+               return -EBUSY;
+
+       ret = wait_event_interruptible_timeout(ar->event_queue,
+                                               ar->is_target_paused == true,
+                                               1 * HZ);
+       if (ret < 0) {
+               ath10k_warn("suspend interrupted (%d)\n", ret);
+               retval = ret;
+               goto resume;
+       } else if (ret == 0) {
+               ath10k_warn("suspend timed out - target pause event never came\n");
+               retval = EIO;
+               goto resume;
+       }
+
+       /*
+        * reset is_target_paused and host can check that in next time,
+        * or it will always be TRUE and host just skip the waiting
+        * condition, it causes target assert due to host already
+        * suspend
+        */
+       ar->is_target_paused = false;
+
+       pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+       if ((val & 0x000000ff) != 0x3) {
+               pci_save_state(pdev);
+               pci_disable_device(pdev);
+               pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+                                      (val & 0xffffff00) | 0x03);
+       }
+
+       return 0;
+resume:
+       ret = ath10k_core_target_resume(ar);
+       if (ret)
+               ath10k_warn("could not resume (%d)\n", ret);
+
+       return retval;
+}
+
+static int ath10k_pci_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct ath10k *ar = pci_get_drvdata(pdev);
+       struct ath10k_pci *ar_pci;
+       int ret;
+       u32 val;
+
+       ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+
+       if (!ar)
+               return -ENODEV;
+       ar_pci = ath10k_pci_priv(ar);
+
+       if (!ar_pci)
+               return -ENODEV;
+
+       ret = pci_enable_device(pdev);
+       if (ret) {
+               ath10k_warn("cannot enable PCI device: %d\n", ret);
+               return ret;
+       }
+
+       pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+       if ((val & 0x000000ff) != 0) {
+               pci_restore_state(pdev);
+               pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+                                      val & 0xffffff00);
+               /*
+                * Suspend/Resume resets the PCI configuration space,
+                * so we have to re-disable the RETRY_TIMEOUT register (0x41)
+                * to keep PCI Tx retries from interfering with C3 CPU state
+                */
+               pci_read_config_dword(pdev, 0x40, &val);
+
+               if ((val & 0x0000ff00) != 0)
+                       pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+       }
+
+       ret = ath10k_core_target_resume(ar);
+       if (ret)
+               ath10k_warn("target resume failed: %d\n", ret);
+
+       return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops,
+                        ath10k_pci_suspend,
+                        ath10k_pci_resume);
+
+#define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
+
+#else
+
+#define ATH10K_PCI_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
+
+static struct pci_driver ath10k_pci_driver = {
+       .name = "ath10k_pci",
+       .id_table = ath10k_pci_id_table,
+       .probe = ath10k_pci_probe,
+       .remove = ath10k_pci_remove,
+       .driver.pm = ATH10K_PCI_PM_OPS,
+};
+
+static int __init ath10k_pci_init(void)
+{
+       int ret;
+
+       ret = pci_register_driver(&ath10k_pci_driver);
+       if (ret)
+               ath10k_err("pci_register_driver failed [%d]\n", ret);
+
+       return ret;
+}
+module_init(ath10k_pci_init);
+
+static void __exit ath10k_pci_exit(void)
+{
+       pci_unregister_driver(&ath10k_pci_driver);
+}
+
+module_exit(ath10k_pci_exit);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
+MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
+MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
new file mode 100644 (file)
index 0000000..d2a055a
--- /dev/null
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _PCI_H_
+#define _PCI_H_
+
+#include <linux/interrupt.h>
+
+#include "hw.h"
+#include "ce.h"
+
+/* FW dump area */
+#define REG_DUMP_COUNT_QCA988X 60
+
+/*
+ * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
+ */
+#define DIAG_TRANSFER_LIMIT 2048
+
+/*
+ * maximum number of bytes that can be
+ * handled atomically by DiagRead/DiagWrite
+ */
+#define DIAG_TRANSFER_LIMIT 2048
+
+struct bmi_xfer {
+       struct completion done;
+       bool wait_for_resp;
+       u32 resp_len;
+};
+
+struct ath10k_pci_compl {
+       struct list_head list;
+       int send_or_recv;
+       struct ce_state *ce_state;
+       struct hif_ce_pipe_info *pipe_info;
+       void *transfer_context;
+       unsigned int nbytes;
+       unsigned int transfer_id;
+       unsigned int flags;
+};
+
+/* compl_state.send_or_recv */
+#define HIF_CE_COMPLETE_FREE 0
+#define HIF_CE_COMPLETE_SEND 1
+#define HIF_CE_COMPLETE_RECV 2
+
+/*
+ * PCI-specific Target state
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ *
+ * Much of this may be of interest to the Host so
+ * HOST_INTEREST->hi_interconnect_state points here
+ * (and all members are 32-bit quantities in order to
+ * facilitate Host access). In particular, Host software is
+ * required to initialize pipe_cfg_addr and svc_to_pipe_map.
+ */
+struct pcie_state {
+       /* Pipe configuration Target address */
+       /* NB: ce_pipe_config[CE_COUNT] */
+       u32 pipe_cfg_addr;
+
+       /* Service to pipe map Target address */
+       /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
+       u32 svc_to_pipe_map;
+
+       /* number of MSI interrupts requested */
+       u32 msi_requested;
+
+       /* number of MSI interrupts granted */
+       u32 msi_granted;
+
+       /* Message Signalled Interrupt address */
+       u32 msi_addr;
+
+       /* Base data */
+       u32 msi_data;
+
+       /*
+        * Data for firmware interrupt;
+        * MSI data for other interrupts are
+        * in various SoC registers
+        */
+       u32 msi_fw_intr_data;
+
+       /* PCIE_PWR_METHOD_* */
+       u32 power_mgmt_method;
+
+       /* PCIE_CONFIG_FLAG_* */
+       u32 config_flags;
+};
+
+/* PCIE_CONFIG_FLAG definitions */
+#define PCIE_CONFIG_FLAG_ENABLE_L1  0x0000001
+
+/* Host software's Copy Engine configuration. */
+#define CE_ATTR_FLAGS 0
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+       u32 pipenum;
+       u32 pipedir;
+       u32 nentries;
+       u32 nbytes_max;
+       u32 flags;
+       u32 reserved;
+};
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE    0
+#define PIPEDIR_IN      1  /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT     2  /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT   3  /* bidirectional */
+
+/* Establish a mapping between a service/direction and a pipe. */
+struct service_to_pipe {
+       u32 service_id;
+       u32 pipedir;
+       u32 pipenum;
+};
+
+enum ath10k_pci_features {
+       ATH10K_PCI_FEATURE_MSI_X                = 0,
+       ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND    = 1,
+
+       /* keep last */
+       ATH10K_PCI_FEATURE_COUNT
+};
+
+/* Per-pipe state. */
+struct hif_ce_pipe_info {
+       /* Handle of underlying Copy Engine */
+       struct ce_state *ce_hdl;
+
+       /* Our pipe number; facilitiates use of pipe_info ptrs. */
+       u8 pipe_num;
+
+       /* Convenience back pointer to hif_ce_state. */
+       struct ath10k *hif_ce_state;
+
+       size_t buf_sz;
+
+       /* protects compl_free and num_send_allowed */
+       spinlock_t pipe_lock;
+
+       /* List of free CE completion slots */
+       struct list_head compl_free;
+
+       /* Limit the number of outstanding send requests. */
+       int num_sends_allowed;
+
+       struct ath10k_pci *ar_pci;
+       struct tasklet_struct intr;
+};
+
+struct ath10k_pci {
+       struct pci_dev *pdev;
+       struct device *dev;
+       struct ath10k *ar;
+       void __iomem *mem;
+       int cacheline_sz;
+
+       DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
+
+       /*
+        * Number of MSI interrupts granted, 0 --> using legacy PCI line
+        * interrupts.
+        */
+       int num_msi_intrs;
+
+       struct tasklet_struct intr_tq;
+       struct tasklet_struct msi_fw_err;
+
+       /* Number of Copy Engines supported */
+       unsigned int ce_count;
+
+       int started;
+
+       atomic_t keep_awake_count;
+       bool verified_awake;
+
+       /* List of CE completions to be processed */
+       struct list_head compl_process;
+
+       /* protects compl_processing and compl_process */
+       spinlock_t compl_lock;
+
+       bool compl_processing;
+
+       struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
+
+       struct ath10k_hif_cb msg_callbacks_current;
+
+       /* Target address used to signal a pending firmware event */
+       u32 fw_indicator_address;
+
+       /* Copy Engine used for Diagnostic Accesses */
+       struct ce_state *ce_diag;
+
+       /* FIXME: document what this really protects */
+       spinlock_t ce_lock;
+
+       /* Map CE id to ce_state */
+       struct ce_state *ce_id_to_state[CE_COUNT_MAX];
+
+       /* makes sure that dummy reads are atomic */
+       spinlock_t hw_v1_workaround_lock;
+};
+
+static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
+{
+       return ar->hif.priv;
+}
+
+static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
+{
+       return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
+{
+       iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
+#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
+
+#define BAR_NUM 0
+
+#define CDC_WAR_MAGIC_STR   0xceef0000
+#define CDC_WAR_DATA_CE     4
+
+/*
+ * TODO: Should be a function call specific to each Target-type.
+ * This convoluted macro converts from Target CPU Virtual Address Space to CE
+ * Address Space. As part of this process, we conservatively fetch the current
+ * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
+ * for this device; but that's not guaranteed.
+ */
+#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr)                 \
+       (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS|                  \
+         CORE_CTRL_ADDRESS)) & 0x7ff) << 21) |                         \
+        0x100000 | ((addr) & 0xfffff))
+
+/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
+#define DIAG_ACCESS_CE_TIMEOUT_MS 10
+
+/*
+ * This API allows the Host to access Target registers directly
+ * and relatively efficiently over PCIe.
+ * This allows the Host to avoid extra overhead associated with
+ * sending a message to firmware and waiting for a response message
+ * from firmware, as is done on other interconnects.
+ *
+ * Yet there is some complexity with direct accesses because the
+ * Target's power state is not known a priori. The Host must issue
+ * special PCIe reads/writes in order to explicitly wake the Target
+ * and to verify that it is awake and will remain awake.
+ *
+ * Usage:
+ *
+ *   Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space.
+ *   These calls must be bracketed by ath10k_pci_wake and
+ *   ath10k_pci_sleep.  A single BEGIN/END pair is adequate for
+ *   multiple READ/WRITE operations.
+ *
+ *   Use ath10k_pci_wake to put the Target in a state in
+ *   which it is legal for the Host to directly access it. This
+ *   may involve waking the Target from a low power state, which
+ *   may take up to 2Ms!
+ *
+ *   Use ath10k_pci_sleep to tell the Target that as far as
+ *   this code path is concerned, it no longer needs to remain
+ *   directly accessible.  BEGIN/END is under a reference counter;
+ *   multiple code paths may issue BEGIN/END on a single targid.
+ */
+static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
+                                     u32 value)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       void __iomem *addr = ar_pci->mem;
+
+       if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
+               unsigned long irq_flags;
+
+               spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
+
+               ioread32(addr+offset+4); /* 3rd read prior to write */
+               ioread32(addr+offset+4); /* 2nd read prior to write */
+               ioread32(addr+offset+4); /* 1st read prior to write */
+               iowrite32(value, addr+offset);
+
+               spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
+                                      irq_flags);
+       } else {
+               iowrite32(value, addr+offset);
+       }
+}
+
+static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       return ioread32(ar_pci->mem + offset);
+}
+
+extern unsigned int ath10k_target_ps;
+
+void ath10k_do_pci_wake(struct ath10k *ar);
+void ath10k_do_pci_sleep(struct ath10k *ar);
+
+static inline void ath10k_pci_wake(struct ath10k *ar)
+{
+       if (ath10k_target_ps)
+               ath10k_do_pci_wake(ar);
+}
+
+static inline void ath10k_pci_sleep(struct ath10k *ar)
+{
+       if (ath10k_target_ps)
+               ath10k_do_pci_sleep(ar);
+}
+
+#endif /* _PCI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
new file mode 100644 (file)
index 0000000..bfec6c8
--- /dev/null
@@ -0,0 +1,990 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _RX_DESC_H_
+#define _RX_DESC_H_
+
+enum rx_attention_flags {
+       RX_ATTENTION_FLAGS_FIRST_MPDU          = 1 << 0,
+       RX_ATTENTION_FLAGS_LAST_MPDU           = 1 << 1,
+       RX_ATTENTION_FLAGS_MCAST_BCAST         = 1 << 2,
+       RX_ATTENTION_FLAGS_PEER_IDX_INVALID    = 1 << 3,
+       RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT    = 1 << 4,
+       RX_ATTENTION_FLAGS_POWER_MGMT          = 1 << 5,
+       RX_ATTENTION_FLAGS_NON_QOS             = 1 << 6,
+       RX_ATTENTION_FLAGS_NULL_DATA           = 1 << 7,
+       RX_ATTENTION_FLAGS_MGMT_TYPE           = 1 << 8,
+       RX_ATTENTION_FLAGS_CTRL_TYPE           = 1 << 9,
+       RX_ATTENTION_FLAGS_MORE_DATA           = 1 << 10,
+       RX_ATTENTION_FLAGS_EOSP                = 1 << 11,
+       RX_ATTENTION_FLAGS_U_APSD_TRIGGER      = 1 << 12,
+       RX_ATTENTION_FLAGS_FRAGMENT            = 1 << 13,
+       RX_ATTENTION_FLAGS_ORDER               = 1 << 14,
+       RX_ATTENTION_FLAGS_CLASSIFICATION      = 1 << 15,
+       RX_ATTENTION_FLAGS_OVERFLOW_ERR        = 1 << 16,
+       RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR     = 1 << 17,
+       RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = 1 << 18,
+       RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL      = 1 << 19,
+       RX_ATTENTION_FLAGS_SA_IDX_INVALID      = 1 << 20,
+       RX_ATTENTION_FLAGS_DA_IDX_INVALID      = 1 << 21,
+       RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT      = 1 << 22,
+       RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT      = 1 << 23,
+       RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED    = 1 << 24,
+       RX_ATTENTION_FLAGS_DIRECTED            = 1 << 25,
+       RX_ATTENTION_FLAGS_BUFFER_FRAGMENT     = 1 << 26,
+       RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR     = 1 << 27,
+       RX_ATTENTION_FLAGS_TKIP_MIC_ERR        = 1 << 28,
+       RX_ATTENTION_FLAGS_DECRYPT_ERR         = 1 << 29,
+       RX_ATTENTION_FLAGS_FCS_ERR             = 1 << 30,
+       RX_ATTENTION_FLAGS_MSDU_DONE           = 1 << 31,
+};
+
+struct rx_attention {
+       __le32 flags; /* %RX_ATTENTION_FLAGS_ */
+} __packed;
+
+/*
+ * first_mpdu
+ *             Indicates the first MSDU of the PPDU.  If both first_mpdu
+ *             and last_mpdu are set in the MSDU then this is a not an
+ *             A-MPDU frame but a stand alone MPDU.  Interior MPDU in an
+ *             A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ *             0.  The PPDU start status will only be valid when this bit
+ *             is set.
+ *
+ * last_mpdu
+ *             Indicates the last MSDU of the last MPDU of the PPDU.  The
+ *             PPDU end status will only be valid when this bit is set.
+ *
+ * mcast_bcast
+ *             Multicast / broadcast indicator.  Only set when the MAC
+ *             address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ *             matches one of the 4 BSSID registers. Only set when
+ *             first_msdu is set.
+ *
+ * peer_idx_invalid
+ *             Indicates no matching entries within the the max search
+ *             count.  Only set when first_msdu is set.
+ *
+ * peer_idx_timeout
+ *             Indicates an unsuccessful search for the peer index due to
+ *             timeout.  Only set when first_msdu is set.
+ *
+ * power_mgmt
+ *             Power management bit set in the 802.11 header.  Only set
+ *             when first_msdu is set.
+ *
+ * non_qos
+ *             Set if packet is not a non-QoS data frame.  Only set when
+ *             first_msdu is set.
+ *
+ * null_data
+ *             Set if frame type indicates either null data or QoS null
+ *             data format.  Only set when first_msdu is set.
+ *
+ * mgmt_type
+ *             Set if packet is a management packet.  Only set when
+ *             first_msdu is set.
+ *
+ * ctrl_type
+ *             Set if packet is a control packet.  Only set when first_msdu
+ *             is set.
+ *
+ * more_data
+ *             Set if more bit in frame control is set.  Only set when
+ *             first_msdu is set.
+ *
+ * eosp
+ *             Set if the EOSP (end of service period) bit in the QoS
+ *             control field is set.  Only set when first_msdu is set.
+ *
+ * u_apsd_trigger
+ *             Set if packet is U-APSD trigger.  Key table will have bits
+ *             per TID to indicate U-APSD trigger.
+ *
+ * fragment
+ *             Indicates that this is an 802.11 fragment frame.  This is
+ *             set when either the more_frag bit is set in the frame
+ *             control or the fragment number is not zero.  Only set when
+ *             first_msdu is set.
+ *
+ * order
+ *             Set if the order bit in the frame control is set.  Only set
+ *             when first_msdu is set.
+ *
+ * classification
+ *             Indicates that this status has a corresponding MSDU that
+ *             requires FW processing.  The OLE will have classification
+ *             ring mask registers which will indicate the ring(s) for
+ *             packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ *             PCU Receive FIFO does not have enough space to store the
+ *             full receive packet.  Enough space is reserved in the
+ *             receive FIFO for the status is written.  This MPDU remaining
+ *             packets in the PPDU will be filtered and no Ack response
+ *             will be transmitted.
+ *
+ * msdu_length_err
+ *             Indicates that the MSDU length from the 802.3 encapsulated
+ *             length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ *             Indicates that the computed checksum (tcp_udp_chksum) did
+ *             not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ *             Indicates that the computed checksum did not match the
+ *             checksum in the IP header.
+ *
+ * sa_idx_invalid
+ *             Indicates no matching entry was found in the address search
+ *             table for the source MAC address.
+ *
+ * da_idx_invalid
+ *             Indicates no matching entry was found in the address search
+ *             table for the destination MAC address.
+ *
+ * sa_idx_timeout
+ *             Indicates an unsuccessful search for the source MAC address
+ *             due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ *             Indicates an unsuccessful search for the destination MAC
+ *             address due to the expiring of the search timer.
+ *
+ * encrypt_required
+ *             Indicates that this data type frame is not encrypted even if
+ *             the policy for this MPDU requires encryption as indicated in
+ *             the peer table key type.
+ *
+ * directed
+ *             MPDU is a directed packet which means that the RA matched
+ *             our STA addresses.  In proxySTA it means that the TA matched
+ *             an entry in our address search table with the corresponding
+ *             'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ *             Indicates that at least one of the rx buffers has been
+ *             fragmented.  If set the FW should look at the rx_frag_info
+ *             descriptor described below.
+ *
+ * mpdu_length_err
+ *             Indicates that the MPDU was pre-maturely terminated
+ *             resulting in a truncated MPDU.  Don't trust the MPDU length
+ *             field.
+ *
+ * tkip_mic_err
+ *             Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ *             Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ *             Indicates that the MPDU FCS check failed
+ *
+ * msdu_done
+ *             If set indicates that the RX packet data, RX header data, RX
+ *             PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ *             start/end descriptors and RX Attention descriptor are all
+ *             valid.  This bit must be in the last octet of the
+ *             descriptor.
+ */
+
+struct rx_frag_info {
+       u8 ring0_more_count;
+       u8 ring1_more_count;
+       u8 ring2_more_count;
+       u8 ring3_more_count;
+} __packed;
+
+/*
+ * ring0_more_count
+ *             Indicates the number of more buffers associated with RX DMA
+ *             ring 0.  Field is filled in by the RX_DMA.
+ *
+ * ring1_more_count
+ *             Indicates the number of more buffers associated with RX DMA
+ *             ring 1. Field is filled in by the RX_DMA.
+ *
+ * ring2_more_count
+ *             Indicates the number of more buffers associated with RX DMA
+ *             ring 2. Field is filled in by the RX_DMA.
+ *
+ * ring3_more_count
+ *             Indicates the number of more buffers associated with RX DMA
+ *             ring 3. Field is filled in by the RX_DMA.
+ */
+
+enum htt_rx_mpdu_encrypt_type {
+       HTT_RX_MPDU_ENCRYPT_WEP40            = 0,
+       HTT_RX_MPDU_ENCRYPT_WEP104           = 1,
+       HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC = 2,
+       HTT_RX_MPDU_ENCRYPT_WEP128           = 3,
+       HTT_RX_MPDU_ENCRYPT_TKIP_WPA         = 4,
+       HTT_RX_MPDU_ENCRYPT_WAPI             = 5,
+       HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2     = 6,
+       HTT_RX_MPDU_ENCRYPT_NONE             = 7,
+};
+
+#define RX_MPDU_START_INFO0_PEER_IDX_MASK     0x000007ff
+#define RX_MPDU_START_INFO0_PEER_IDX_LSB      0
+#define RX_MPDU_START_INFO0_SEQ_NUM_MASK      0x0fff0000
+#define RX_MPDU_START_INFO0_SEQ_NUM_LSB       16
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB  28
+#define RX_MPDU_START_INFO0_FROM_DS           (1 << 11)
+#define RX_MPDU_START_INFO0_TO_DS             (1 << 12)
+#define RX_MPDU_START_INFO0_ENCRYPTED         (1 << 13)
+#define RX_MPDU_START_INFO0_RETRY             (1 << 14)
+#define RX_MPDU_START_INFO0_TXBF_H_INFO       (1 << 15)
+
+#define RX_MPDU_START_INFO1_TID_MASK 0xf0000000
+#define RX_MPDU_START_INFO1_TID_LSB  28
+#define RX_MPDU_START_INFO1_DIRECTED (1 << 16)
+
+struct rx_mpdu_start {
+       __le32 info0;
+       union {
+               struct {
+                       __le32 pn31_0;
+                       __le32 info1; /* %RX_MPDU_START_INFO1_ */
+               } __packed;
+               struct {
+                       u8 pn[6];
+               } __packed;
+       } __packed;
+} __packed;
+
+/*
+ * peer_idx
+ *             The index of the address search table which associated with
+ *             the peer table entry corresponding to this MPDU.  Only valid
+ *             when first_msdu is set.
+ *
+ * fr_ds
+ *             Set if the from DS bit is set in the frame control.  Only
+ *             valid when first_msdu is set.
+ *
+ * to_ds
+ *             Set if the to DS bit is set in the frame control.  Only
+ *             valid when first_msdu is set.
+ *
+ * encrypted
+ *             Protected bit from the frame control.  Only valid when
+ *             first_msdu is set.
+ *
+ * retry
+ *             Retry bit from the frame control.  Only valid when
+ *             first_msdu is set.
+ *
+ * txbf_h_info
+ *             The MPDU data will contain H information.  Primarily used
+ *             for debug.
+ *
+ * seq_num
+ *             The sequence number from the 802.11 header.  Only valid when
+ *             first_msdu is set.
+ *
+ * encrypt_type
+ *             Indicates type of decrypt cipher used (as defined in the
+ *             peer table)
+ *             0: WEP40
+ *             1: WEP104
+ *             2: TKIP without MIC
+ *             3: WEP128
+ *             4: TKIP (WPA)
+ *             5: WAPI
+ *             6: AES-CCM (WPA2)
+ *             7: No cipher
+ *             Only valid when first_msdu_is set
+ *
+ * pn_31_0
+ *             Bits [31:0] of the PN number extracted from the IV field
+ *             WEP: IV = {key_id_octet, pn2, pn1, pn0}.  Only pn[23:0] is
+ *             valid.
+ *             TKIP: IV = {pn5, pn4, pn3, pn2, key_id_octet, pn0,
+ *             WEPSeed[1], pn1}.  Only pn[47:0] is valid.
+ *             AES-CCM: IV = {pn5, pn4, pn3, pn2, key_id_octet, 0x0, pn1,
+ *             pn0}.  Only pn[47:0] is valid.
+ *             WAPI: IV = {key_id_octet, 0x0, pn15, pn14, pn13, pn12, pn11,
+ *             pn10, pn9, pn8, pn7, pn6, pn5, pn4, pn3, pn2, pn1, pn0}.
+ *             The ext_wapi_pn[127:48] in the rx_msdu_misc descriptor and
+ *             pn[47:0] are valid.
+ *             Only valid when first_msdu is set.
+ *
+ * pn_47_32
+ *             Bits [47:32] of the PN number.   See description for
+ *             pn_31_0.  The remaining PN fields are in the rx_msdu_end
+ *             descriptor
+ *
+ * pn
+ *             Use this field to access the pn without worrying about
+ *             byte-order and bitmasking/bitshifting.
+ *
+ * directed
+ *             See definition in RX attention descriptor
+ *
+ * reserved_2
+ *             Reserved: HW should fill with zero.  FW should ignore.
+ *
+ * tid
+ *             The TID field in the QoS control field
+ */
+
+#define RX_MPDU_END_INFO0_RESERVED_0_MASK     0x00001fff
+#define RX_MPDU_END_INFO0_RESERVED_0_LSB      0
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB  16
+#define RX_MPDU_END_INFO0_OVERFLOW_ERR        (1 << 13)
+#define RX_MPDU_END_INFO0_LAST_MPDU           (1 << 14)
+#define RX_MPDU_END_INFO0_POST_DELIM_ERR      (1 << 15)
+#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR     (1 << 28)
+#define RX_MPDU_END_INFO0_TKIP_MIC_ERR        (1 << 29)
+#define RX_MPDU_END_INFO0_DECRYPT_ERR         (1 << 30)
+#define RX_MPDU_END_INFO0_FCS_ERR             (1 << 31)
+
+struct rx_mpdu_end {
+       __le32 info0;
+} __packed;
+
+/*
+ * reserved_0
+ *             Reserved
+ *
+ * overflow_err
+ *             PCU Receive FIFO does not have enough space to store the
+ *             full receive packet.  Enough space is reserved in the
+ *             receive FIFO for the status is written.  This MPDU remaining
+ *             packets in the PPDU will be filtered and no Ack response
+ *             will be transmitted.
+ *
+ * last_mpdu
+ *             Indicates that this is the last MPDU of a PPDU.
+ *
+ * post_delim_err
+ *             Indicates that a delimiter FCS error occurred after this
+ *             MPDU before the next MPDU.  Only valid when last_msdu is
+ *             set.
+ *
+ * post_delim_cnt
+ *             Count of the delimiters after this MPDU.  This requires the
+ *             last MPDU to be held until all the EOF descriptors have been
+ *             received.  This may be inefficient in the future when
+ *             ML-MIMO is used.  Only valid when last_mpdu is set.
+ *
+ * mpdu_length_err
+ *             See definition in RX attention descriptor
+ *
+ * tkip_mic_err
+ *             See definition in RX attention descriptor
+ *
+ * decrypt_err
+ *             See definition in RX attention descriptor
+ *
+ * fcs_err
+ *             See definition in RX attention descriptor
+ */
+
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_MASK    0x00003fff
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_LSB     0
+#define RX_MSDU_START_INFO0_IP_OFFSET_MASK      0x000fc000
+#define RX_MSDU_START_INFO0_IP_OFFSET_LSB       14
+#define RX_MSDU_START_INFO0_RING_MASK_MASK      0x00f00000
+#define RX_MSDU_START_INFO0_RING_MASK_LSB       20
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_MASK 0x7f000000
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_LSB  24
+
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_MASK    0x000000ff
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_LSB     0
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_MASK   0x00000300
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB    8
+#define RX_MSDU_START_INFO1_SA_IDX_MASK         0x07ff0000
+#define RX_MSDU_START_INFO1_SA_IDX_LSB          16
+#define RX_MSDU_START_INFO1_IPV4_PROTO          (1 << 10)
+#define RX_MSDU_START_INFO1_IPV6_PROTO          (1 << 11)
+#define RX_MSDU_START_INFO1_TCP_PROTO           (1 << 12)
+#define RX_MSDU_START_INFO1_UDP_PROTO           (1 << 13)
+#define RX_MSDU_START_INFO1_IP_FRAG             (1 << 14)
+#define RX_MSDU_START_INFO1_TCP_ONLY_ACK        (1 << 15)
+
+enum rx_msdu_decap_format {
+       RX_MSDU_DECAP_RAW           = 0,
+       RX_MSDU_DECAP_NATIVE_WIFI   = 1,
+       RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+       RX_MSDU_DECAP_8023_SNAP_LLC = 3
+};
+
+struct rx_msdu_start {
+       __le32 info0; /* %RX_MSDU_START_INFO0_ */
+       __le32 flow_id_crc;
+       __le32 info1; /* %RX_MSDU_START_INFO1_ */
+} __packed;
+
+/*
+ * msdu_length
+ *             MSDU length in bytes after decapsulation.  This field is
+ *             still valid for MPDU frames without A-MSDU.  It still
+ *             represents MSDU length after decapsulation
+ *
+ * ip_offset
+ *             Indicates the IP offset in bytes from the start of the
+ *             packet after decapsulation.  Only valid if ipv4_proto or
+ *             ipv6_proto is set.
+ *
+ * ring_mask
+ *             Indicates the destination RX rings for this MSDU.
+ *
+ * tcp_udp_offset
+ *             Indicates the offset in bytes to the start of TCP or UDP
+ *             header from the start of the IP header after decapsulation.
+ *             Only valid if tcp_prot or udp_prot is set.  The value 0
+ *             indicates that the offset is longer than 127 bytes.
+ *
+ * reserved_0c
+ *             Reserved: HW should fill with zero.  FW should ignore.
+ *
+ * flow_id_crc
+ *             The flow_id_crc runs CRC32 on the following information:
+ *             IPv4 option: dest_addr[31:0], src_addr [31:0], {24'b0,
+ *             protocol[7:0]}.
+ *             IPv6 option: dest_addr[127:0], src_addr [127:0], {24'b0,
+ *             next_header[7:0]}
+ *             UDP case: sort_port[15:0], dest_port[15:0]
+ *             TCP case: sort_port[15:0], dest_port[15:0],
+ *             {header_length[3:0], 6'b0, flags[5:0], window_size[15:0]},
+ *             {16'b0, urgent_ptr[15:0]}, all options except 32-bit
+ *             timestamp.
+ *
+ * msdu_number
+ *             Indicates the MSDU number within a MPDU.  This value is
+ *             reset to zero at the start of each MPDU.  If the number of
+ *             MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_format
+ *             Indicates the format after decapsulation:
+ *             0: RAW: No decapsulation
+ *             1: Native WiFi
+ *             2: Ethernet 2 (DIX)
+ *             3: 802.3 (SNAP/LLC)
+ *
+ * ipv4_proto
+ *             Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ *             Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ *             Set if the ipv4_proto or ipv6_proto are set and the IP
+ *             protocol indicates TCP.
+ *
+ * udp_proto
+ *             Set if the ipv4_proto or ipv6_proto are set and the IP
+ *                     protocol indicates UDP.
+ *
+ * ip_frag
+ *             Indicates that either the IP More frag bit is set or IP frag
+ *             number is non-zero.  If set indicates that this is a
+ *             fragmented IP packet.
+ *
+ * tcp_only_ack
+ *             Set if only the TCP Ack bit is set in the TCP flags and if
+ *             the TCP payload is 0.
+ *
+ * sa_idx
+ *             The offset in the address table which matches the MAC source
+ *             address.
+ *
+ * reserved_2b
+ *             Reserved: HW should fill with zero.  FW should ignore.
+ */
+
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB  0
+#define RX_MSDU_END_INFO0_FIRST_MSDU                (1 << 14)
+#define RX_MSDU_END_INFO0_LAST_MSDU                 (1 << 15)
+#define RX_MSDU_END_INFO0_PRE_DELIM_ERR             (1 << 30)
+#define RX_MSDU_END_INFO0_RESERVED_3B               (1 << 31)
+
+struct rx_msdu_end {
+       __le16 ip_hdr_cksum;
+       __le16 tcp_hdr_cksum;
+       u8 key_id_octet;
+       u8 classification_filter;
+       u8 wapi_pn[10];
+       __le32 info0;
+} __packed;
+
+/*
+ *ip_hdr_chksum
+ *             This can include the IP header checksum or the pseudo header
+ *             checksum used by TCP/UDP checksum.
+ *
+ *tcp_udp_chksum
+ *             The value of the computed TCP/UDP checksum.  A mode bit
+ *             selects whether this checksum is the full checksum or the
+ *             partial checksum which does not include the pseudo header.
+ *
+ *key_id_octet
+ *             The key ID octet from the IV.  Only valid when first_msdu is
+ *             set.
+ *
+ *classification_filter
+ *             Indicates the number classification filter rule
+ *
+ *ext_wapi_pn_63_48
+ *             Extension PN (packet number) which is only used by WAPI.
+ *             This corresponds to WAPI PN bits [63:48] (pn6 and pn7).  The
+ *             WAPI PN bits [63:0] are in the pn field of the rx_mpdu_start
+ *             descriptor.
+ *
+ *ext_wapi_pn_95_64
+ *             Extension PN (packet number) which is only used by WAPI.
+ *             This corresponds to WAPI PN bits [95:64] (pn8, pn9, pn10 and
+ *             pn11).
+ *
+ *ext_wapi_pn_127_96
+ *             Extension PN (packet number) which is only used by WAPI.
+ *             This corresponds to WAPI PN bits [127:96] (pn12, pn13, pn14,
+ *             pn15).
+ *
+ *reported_mpdu_length
+ *             MPDU length before decapsulation.  Only valid when
+ *             first_msdu is set.  This field is taken directly from the
+ *             length field of the A-MPDU delimiter or the preamble length
+ *             field for non-A-MPDU frames.
+ *
+ *first_msdu
+ *             Indicates the first MSDU of A-MSDU.  If both first_msdu and
+ *             last_msdu are set in the MSDU then this is a non-aggregated
+ *             MSDU frame: normal MPDU.  Interior MSDU in an A-MSDU shall
+ *             have both first_mpdu and last_mpdu bits set to 0.
+ *
+ *last_msdu
+ *             Indicates the last MSDU of the A-MSDU.  MPDU end status is
+ *             only valid when last_msdu is set.
+ *
+ *reserved_3a
+ *             Reserved: HW should fill with zero.  FW should ignore.
+ *
+ *pre_delim_err
+ *             Indicates that the first delimiter had a FCS failure.  Only
+ *             valid when first_mpdu and first_msdu are set.
+ *
+ *reserved_3b
+ *             Reserved: HW should fill with zero.  FW should ignore.
+ */
+
+#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0
+#define RX_PPDU_START_SIG_RATE_SELECT_CCK  1
+
+#define RX_PPDU_START_SIG_RATE_OFDM_48 0
+#define RX_PPDU_START_SIG_RATE_OFDM_24 1
+#define RX_PPDU_START_SIG_RATE_OFDM_12 2
+#define RX_PPDU_START_SIG_RATE_OFDM_6  3
+#define RX_PPDU_START_SIG_RATE_OFDM_54 4
+#define RX_PPDU_START_SIG_RATE_OFDM_36 5
+#define RX_PPDU_START_SIG_RATE_OFDM_18 6
+#define RX_PPDU_START_SIG_RATE_OFDM_9  7
+
+#define RX_PPDU_START_SIG_RATE_CCK_LP_11  0
+#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1
+#define RX_PPDU_START_SIG_RATE_CCK_LP_2   2
+#define RX_PPDU_START_SIG_RATE_CCK_LP_1   3
+#define RX_PPDU_START_SIG_RATE_CCK_SP_11  4
+#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5
+#define RX_PPDU_START_SIG_RATE_CCK_SP_2   6
+
+#define HTT_RX_PPDU_START_PREAMBLE_LEGACY        0x04
+#define HTT_RX_PPDU_START_PREAMBLE_HT            0x08
+#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF  0x09
+#define HTT_RX_PPDU_START_PREAMBLE_VHT           0x0C
+#define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D
+
+#define RX_PPDU_START_INFO0_IS_GREENFIELD (1 << 0)
+
+#define RX_PPDU_START_INFO1_L_SIG_RATE_MASK    0x0000000f
+#define RX_PPDU_START_INFO1_L_SIG_RATE_LSB     0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_MASK  0x0001ffe0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_LSB   5
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_MASK    0x00fc0000
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB     18
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB  24
+#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT  (1 << 4)
+#define RX_PPDU_START_INFO1_L_SIG_PARITY       (1 << 17)
+
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB  0
+
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB  0
+#define RX_PPDU_START_INFO3_TXBF_H_INFO             (1 << 24)
+
+#define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff
+#define RX_PPDU_START_INFO4_VHT_SIG_B_LSB  0
+
+#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
+#define RX_PPDU_START_INFO5_SERVICE_LSB  0
+
+struct rx_ppdu_start {
+       struct {
+               u8 pri20_mhz;
+               u8 ext20_mhz;
+               u8 ext40_mhz;
+               u8 ext80_mhz;
+       } rssi_chains[4];
+       u8 rssi_comb;
+       __le16 rsvd0;
+       u8 info0; /* %RX_PPDU_START_INFO0_ */
+       __le32 info1; /* %RX_PPDU_START_INFO1_ */
+       __le32 info2; /* %RX_PPDU_START_INFO2_ */
+       __le32 info3; /* %RX_PPDU_START_INFO3_ */
+       __le32 info4; /* %RX_PPDU_START_INFO4_ */
+       __le32 info5; /* %RX_PPDU_START_INFO5_ */
+} __packed;
+
+/*
+ * rssi_chain0_pri20
+ *             RSSI of RX PPDU on chain 0 of primary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec20
+ *             RSSI of RX PPDU on chain 0 of secondary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec40
+ *             RSSI of RX PPDU on chain 0 of secondary 40 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec80
+ *             RSSI of RX PPDU on chain 0 of secondary 80 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_pri20
+ *             RSSI of RX PPDU on chain 1 of primary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec20
+ *             RSSI of RX PPDU on chain 1 of secondary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec40
+ *             RSSI of RX PPDU on chain 1 of secondary 40 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec80
+ *             RSSI of RX PPDU on chain 1 of secondary 80 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_pri20
+ *             RSSI of RX PPDU on chain 2 of primary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec20
+ *             RSSI of RX PPDU on chain 2 of secondary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec40
+ *             RSSI of RX PPDU on chain 2 of secondary 40 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec80
+ *             RSSI of RX PPDU on chain 2 of secondary 80 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_pri20
+ *             RSSI of RX PPDU on chain 3 of primary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec20
+ *             RSSI of RX PPDU on chain 3 of secondary 20 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec40
+ *             RSSI of RX PPDU on chain 3 of secondary 40 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec80
+ *             RSSI of RX PPDU on chain 3 of secondary 80 MHz bandwidth.
+ *             Value of 0x80 indicates invalid.
+ *
+ * rssi_comb
+ *             The combined RSSI of RX PPDU of all active chains and
+ *             bandwidths.  Value of 0x80 indicates invalid.
+ *
+ * reserved_4a
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * is_greenfield
+ *             Do we really support this?
+ *
+ * reserved_4b
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * l_sig_rate
+ *             If l_sig_rate_select is 0:
+ *             0x8: OFDM 48 Mbps
+ *             0x9: OFDM 24 Mbps
+ *             0xA: OFDM 12 Mbps
+ *             0xB: OFDM 6 Mbps
+ *             0xC: OFDM 54 Mbps
+ *             0xD: OFDM 36 Mbps
+ *             0xE: OFDM 18 Mbps
+ *             0xF: OFDM 9 Mbps
+ *             If l_sig_rate_select is 1:
+ *             0x8: CCK 11 Mbps long preamble
+ *             0x9: CCK 5.5 Mbps long preamble
+ *             0xA: CCK 2 Mbps long preamble
+ *             0xB: CCK 1 Mbps long preamble
+ *             0xC: CCK 11 Mbps short preamble
+ *             0xD: CCK 5.5 Mbps short preamble
+ *             0xE: CCK 2 Mbps short preamble
+ *
+ * l_sig_rate_select
+ *             Legacy signal rate select.  If set then l_sig_rate indicates
+ *             CCK rates.  If clear then l_sig_rate indicates OFDM rates.
+ *
+ * l_sig_length
+ *             Length of legacy frame in octets.
+ *
+ * l_sig_parity
+ *             Odd parity over l_sig_rate and l_sig_length
+ *
+ * l_sig_tail
+ *             Tail bits for Viterbi decoder
+ *
+ * preamble_type
+ *             Indicates the type of preamble ahead:
+ *             0x4: Legacy (OFDM/CCK)
+ *             0x8: HT
+ *             0x9: HT with TxBF
+ *             0xC: VHT
+ *             0xD: VHT with TxBF
+ *             0x80 - 0xFF: Reserved for special baseband data types such
+ *             as radar and spectral scan.
+ *
+ * ht_sig_vht_sig_a_1
+ *             If preamble_type == 0x8 or 0x9
+ *             HT-SIG (first 24 bits)
+ *             If preamble_type == 0xC or 0xD
+ *             VHT-SIG A (first 24 bits)
+ *             Else
+ *             Reserved
+ *
+ * reserved_6
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ht_sig_vht_sig_a_2
+ *             If preamble_type == 0x8 or 0x9
+ *             HT-SIG (last 24 bits)
+ *             If preamble_type == 0xC or 0xD
+ *             VHT-SIG A (last 24 bits)
+ *             Else
+ *             Reserved
+ *
+ * txbf_h_info
+ *             Indicates that the packet data carries H information which
+ *             is used for TxBF debug.
+ *
+ * reserved_7
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * vht_sig_b
+ *             WiFi 1.0 and WiFi 2.0 will likely have this field to be all
+ *             0s since the BB does not plan on decoding VHT SIG-B.
+ *
+ * reserved_8
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * service
+ *             Service field from BB for OFDM, HT and VHT packets.  CCK
+ *             packets will have service field of 0.
+ *
+ * reserved_9
+ *             Reserved: HW should fill with 0, FW should ignore.
+*/
+
+
+#define RX_PPDU_END_FLAGS_PHY_ERR             (1 << 0)
+#define RX_PPDU_END_FLAGS_RX_LOCATION         (1 << 1)
+#define RX_PPDU_END_FLAGS_TXBF_H_INFO         (1 << 2)
+
+#define RX_PPDU_END_INFO0_RX_ANTENNA_MASK     0x00ffffff
+#define RX_PPDU_END_INFO0_RX_ANTENNA_LSB      0
+#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
+#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
+
+#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
+
+struct rx_ppdu_end {
+       __le32 evm_p0;
+       __le32 evm_p1;
+       __le32 evm_p2;
+       __le32 evm_p3;
+       __le32 evm_p4;
+       __le32 evm_p5;
+       __le32 evm_p6;
+       __le32 evm_p7;
+       __le32 evm_p8;
+       __le32 evm_p9;
+       __le32 evm_p10;
+       __le32 evm_p11;
+       __le32 evm_p12;
+       __le32 evm_p13;
+       __le32 evm_p14;
+       __le32 evm_p15;
+       __le32 tsf_timestamp;
+       __le32 wb_timestamp;
+       u8 locationing_timestamp;
+       u8 phy_err_code;
+       __le16 flags; /* %RX_PPDU_END_FLAGS_ */
+       __le32 info0; /* %RX_PPDU_END_INFO0_ */
+       __le16 bb_length;
+       __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+/*
+ * evm_p0
+ *             EVM for pilot 0.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p1
+ *             EVM for pilot 1.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p2
+ *             EVM for pilot 2.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p3
+ *             EVM for pilot 3.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p4
+ *             EVM for pilot 4.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p5
+ *             EVM for pilot 5.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p6
+ *             EVM for pilot 6.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p7
+ *             EVM for pilot 7.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p8
+ *             EVM for pilot 8.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p9
+ *             EVM for pilot 9.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p10
+ *             EVM for pilot 10.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p11
+ *             EVM for pilot 11.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p12
+ *             EVM for pilot 12.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p13
+ *             EVM for pilot 13.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p14
+ *             EVM for pilot 14.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p15
+ *             EVM for pilot 15.  Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * tsf_timestamp
+ *             Receive TSF timestamp sampled on the rising edge of
+ *             rx_clear.  For PHY errors this may be the current TSF when
+ *             phy_error is asserted if the rx_clear does not assert before
+ *             the end of the PHY error.
+ *
+ * wb_timestamp
+ *             WLAN/BT timestamp is a 1 usec resolution timestamp which
+ *             does not get updated based on receive beacon like TSF.  The
+ *             same rules for capturing tsf_timestamp are used to capture
+ *             the wb_timestamp.
+ *
+ * locationing_timestamp
+ *             Timestamp used for locationing.  This timestamp is used to
+ *             indicate fractions of usec.  For example if the MAC clock is
+ *             running at 80 MHz, the timestamp will increment every 12.5
+ *             nsec.  The value starts at 0 and increments to 79 and
+ *             returns to 0 and repeats.  This information is valid for
+ *             every PPDU.  This information can be used in conjunction
+ *             with wb_timestamp to capture large delta times.
+ *
+ * phy_err_code
+ *             See the 1.10.8.1.2 for the list of the PHY error codes.
+ *
+ * phy_err
+ *             Indicates a PHY error was detected for this PPDU.
+ *
+ * rx_location
+ *             Indicates that location information was requested.
+ *
+ * txbf_h_info
+ *             Indicates that the packet data carries H information which
+ *             is used for TxBF debug.
+ *
+ * reserved_18
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * rx_antenna
+ *             Receive antenna value
+ *
+ * tx_ht_vht_ack
+ *             Indicates that a HT or VHT Ack/BA frame was transmitted in
+ *             response to this receive packet.
+ *
+ * bb_captured_channel
+ *             Indicates that the BB has captured a channel dump.  FW can
+ *             then read the channel dump memory.  This may indicate that
+ *             the channel was captured either based on PCU setting the
+ *             capture_channel bit  BB descriptor or FW setting the
+ *             capture_channel mode bit.
+ *
+ * reserved_19
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * bb_length
+ *             Indicates the number of bytes of baseband information for
+ *             PPDUs where the BB descriptor preamble type is 0x80 to 0xFF
+ *             which indicates that this is not a normal PPDU but rather
+ *             contains baseband debug information.
+ *
+ * reserved_20
+ *             Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ppdu_done
+ *             PPDU end status is only valid when ppdu_done bit is set.
+ *             Every time HW sets this bit in memory FW/SW must clear this
+ *             bit in memory.  FW will initialize all the ppdu_done dword
+ *             to 0.
+*/
+
+#define FW_RX_DESC_INFO0_DISCARD  (1 << 0)
+#define FW_RX_DESC_INFO0_FORWARD  (1 << 1)
+#define FW_RX_DESC_INFO0_INSPECT  (1 << 5)
+#define FW_RX_DESC_INFO0_EXT_MASK 0xC0
+#define FW_RX_DESC_INFO0_EXT_LSB  6
+
+struct fw_rx_desc_base {
+       u8 info0;
+} __packed;
+
+#endif /* _RX_DESC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
new file mode 100644 (file)
index 0000000..be7ba1e
--- /dev/null
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __TARGADDRS_H__
+#define __TARGADDRS_H__
+
+/*
+ * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
+ * host_interest structure.  It must match the address of the _host_interest
+ * symbol (see linker script).
+ *
+ * Host Interest is shared between Host and Target in order to coordinate
+ * between the two, and is intended to remain constant (with additions only
+ * at the end) across software releases.
+ *
+ * All addresses are available here so that it's possible to
+ * write a single binary that works with all Target Types.
+ * May be used in assembler code as well as C.
+ */
+#define QCA988X_HOST_INTEREST_ADDRESS    0x00400800
+#define HOST_INTEREST_MAX_SIZE          0x200
+
+/*
+ * These are items that the Host may need to access via BMI or via the
+ * Diagnostic Window. The position of items in this structure must remain
+ * constant across firmware revisions! Types for each item must be fixed
+ * size across target and host platforms. More items may be added at the end.
+ */
+struct host_interest {
+       /*
+        * Pointer to application-defined area, if any.
+        * Set by Target application during startup.
+        */
+       u32 hi_app_host_interest;                       /* 0x00 */
+
+       /* Pointer to register dump area, valid after Target crash. */
+       u32 hi_failure_state;                           /* 0x04 */
+
+       /* Pointer to debug logging header */
+       u32 hi_dbglog_hdr;                              /* 0x08 */
+
+       u32 hi_unused0c;                                /* 0x0c */
+
+       /*
+        * General-purpose flag bits, similar to SOC_OPTION_* flags.
+        * Can be used by application rather than by OS.
+        */
+       u32 hi_option_flag;                             /* 0x10 */
+
+       /*
+        * Boolean that determines whether or not to
+        * display messages on the serial port.
+        */
+       u32 hi_serial_enable;                           /* 0x14 */
+
+       /* Start address of DataSet index, if any */
+       u32 hi_dset_list_head;                          /* 0x18 */
+
+       /* Override Target application start address */
+       u32 hi_app_start;                               /* 0x1c */
+
+       /* Clock and voltage tuning */
+       u32 hi_skip_clock_init;                         /* 0x20 */
+       u32 hi_core_clock_setting;                      /* 0x24 */
+       u32 hi_cpu_clock_setting;                       /* 0x28 */
+       u32 hi_system_sleep_setting;                    /* 0x2c */
+       u32 hi_xtal_control_setting;                    /* 0x30 */
+       u32 hi_pll_ctrl_setting_24ghz;                  /* 0x34 */
+       u32 hi_pll_ctrl_setting_5ghz;                   /* 0x38 */
+       u32 hi_ref_voltage_trim_setting;                /* 0x3c */
+       u32 hi_clock_info;                              /* 0x40 */
+
+       /* Host uses BE CPU or not */
+       u32 hi_be;                                      /* 0x44 */
+
+       u32 hi_stack;   /* normal stack */                      /* 0x48 */
+       u32 hi_err_stack; /* error stack */             /* 0x4c */
+       u32 hi_desired_cpu_speed_hz;                    /* 0x50 */
+
+       /* Pointer to Board Data  */
+       u32 hi_board_data;                              /* 0x54 */
+
+       /*
+        * Indication of Board Data state:
+        *    0: board data is not yet initialized.
+        *    1: board data is initialized; unknown size
+        *   >1: number of bytes of initialized board data
+        */
+       u32 hi_board_data_initialized;                  /* 0x58 */
+
+       u32 hi_dset_ram_index_table;                    /* 0x5c */
+
+       u32 hi_desired_baud_rate;                       /* 0x60 */
+       u32 hi_dbglog_config;                           /* 0x64 */
+       u32 hi_end_ram_reserve_sz;                      /* 0x68 */
+       u32 hi_mbox_io_block_sz;                        /* 0x6c */
+
+       u32 hi_num_bpatch_streams;                      /* 0x70 -- unused */
+       u32 hi_mbox_isr_yield_limit;                    /* 0x74 */
+
+       u32 hi_refclk_hz;                               /* 0x78 */
+       u32 hi_ext_clk_detected;                        /* 0x7c */
+       u32 hi_dbg_uart_txpin;                          /* 0x80 */
+       u32 hi_dbg_uart_rxpin;                          /* 0x84 */
+       u32 hi_hci_uart_baud;                           /* 0x88 */
+       u32 hi_hci_uart_pin_assignments;                /* 0x8C */
+
+       u32 hi_hci_uart_baud_scale_val;                 /* 0x90 */
+       u32 hi_hci_uart_baud_step_val;                  /* 0x94 */
+
+       u32 hi_allocram_start;                          /* 0x98 */
+       u32 hi_allocram_sz;                             /* 0x9c */
+       u32 hi_hci_bridge_flags;                        /* 0xa0 */
+       u32 hi_hci_uart_support_pins;                   /* 0xa4 */
+
+       u32 hi_hci_uart_pwr_mgmt_params;                /* 0xa8 */
+
+       /*
+        * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
+        *        [31:16]: wakeup timeout in ms
+        */
+       /* Pointer to extended board Data  */
+       u32 hi_board_ext_data;                          /* 0xac */
+       u32 hi_board_ext_data_config;                   /* 0xb0 */
+       /*
+        * Bit [0]  :   valid
+        * Bit[31:16:   size
+        */
+       /*
+        * hi_reset_flag is used to do some stuff when target reset.
+        * such as restore app_start after warm reset or
+        * preserve host Interest area, or preserve ROM data, literals etc.
+        */
+       u32  hi_reset_flag;                             /* 0xb4 */
+       /* indicate hi_reset_flag is valid */
+       u32  hi_reset_flag_valid;                       /* 0xb8 */
+       u32 hi_hci_uart_pwr_mgmt_params_ext;            /* 0xbc */
+       /* 0xbc - [31:0]: idle timeout in ms */
+       /* ACS flags */
+       u32 hi_acs_flags;                               /* 0xc0 */
+       u32 hi_console_flags;                           /* 0xc4 */
+       u32 hi_nvram_state;                             /* 0xc8 */
+       u32 hi_option_flag2;                            /* 0xcc */
+
+       /* If non-zero, override values sent to Host in WMI_READY event. */
+       u32 hi_sw_version_override;                     /* 0xd0 */
+       u32 hi_abi_version_override;                    /* 0xd4 */
+
+       /*
+        * Percentage of high priority RX traffic to total expected RX traffic
+        * applicable only to ar6004
+        */
+       u32 hi_hp_rx_traffic_ratio;                     /* 0xd8 */
+
+       /* test applications flags */
+       u32 hi_test_apps_related;                       /* 0xdc */
+       /* location of test script */
+       u32 hi_ota_testscript;                          /* 0xe0 */
+       /* location of CAL data */
+       u32 hi_cal_data;                                /* 0xe4 */
+
+       /* Number of packet log buffers */
+       u32 hi_pktlog_num_buffers;                      /* 0xe8 */
+
+       /* wow extension configuration */
+       u32 hi_wow_ext_config;                          /* 0xec */
+       u32 hi_pwr_save_flags;                          /* 0xf0 */
+
+       /* Spatial Multiplexing Power Save (SMPS) options */
+       u32 hi_smps_options;                            /* 0xf4 */
+
+       /* Interconnect-specific state */
+       u32 hi_interconnect_state;                      /* 0xf8 */
+
+       /* Coex configuration flags */
+       u32 hi_coex_config;                             /* 0xfc */
+
+       /* Early allocation support */
+       u32 hi_early_alloc;                             /* 0x100 */
+       /* FW swap field */
+       /*
+        * Bits of this 32bit word will be used to pass specific swap
+        * instruction to FW
+        */
+       /*
+        * Bit 0 -- AP Nart descriptor no swap. When this bit is set
+        * FW will not swap TX descriptor. Meaning packets are formed
+        * on the target processor.
+        */
+       /* Bit 1 - unused */
+       u32 hi_fw_swap;                                 /* 0x104 */
+} __packed;
+
+#define HI_ITEM(item)  offsetof(struct host_interest, item)
+
+/* Bits defined in hi_option_flag */
+
+/* Enable timer workaround */
+#define HI_OPTION_TIMER_WAR         0x01
+/* Limit BMI command credits */
+#define HI_OPTION_BMI_CRED_LIMIT    0x02
+/* Relay Dot11 hdr to/from host */
+#define HI_OPTION_RELAY_DOT11_HDR   0x04
+/* MAC addr method 0-locally administred 1-globally unique addrs */
+#define HI_OPTION_MAC_ADDR_METHOD   0x08
+/* Firmware Bridging */
+#define HI_OPTION_FW_BRIDGE         0x10
+/* Enable CPU profiling */
+#define HI_OPTION_ENABLE_PROFILE    0x20
+/* Disable debug logging */
+#define HI_OPTION_DISABLE_DBGLOG    0x40
+/* Skip Era Tracking */
+#define HI_OPTION_SKIP_ERA_TRACKING 0x80
+/* Disable PAPRD (debug) */
+#define HI_OPTION_PAPRD_DISABLE     0x100
+#define HI_OPTION_NUM_DEV_LSB       0x200
+#define HI_OPTION_NUM_DEV_MSB       0x800
+#define HI_OPTION_DEV_MODE_LSB      0x1000
+#define HI_OPTION_DEV_MODE_MSB      0x8000000
+/* Disable LowFreq Timer Stabilization */
+#define HI_OPTION_NO_LFT_STBL       0x10000000
+/* Skip regulatory scan */
+#define HI_OPTION_SKIP_REG_SCAN     0x20000000
+/*
+ * Do regulatory scan during init before
+ * sending WMI ready event to host
+ */
+#define HI_OPTION_INIT_REG_SCAN     0x40000000
+
+/* REV6: Do not adjust memory map */
+#define HI_OPTION_SKIP_MEMMAP       0x80000000
+
+#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
+
+/* 2 bits of hi_option_flag are used to represent 3 modes */
+#define HI_OPTION_FW_MODE_IBSS    0x0 /* IBSS Mode */
+#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
+#define HI_OPTION_FW_MODE_AP      0x2 /* AP Mode */
+#define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */
+
+/* 2 bits of hi_option flag are usedto represent 4 submodes */
+#define HI_OPTION_FW_SUBMODE_NONE    0x0  /* Normal mode */
+#define HI_OPTION_FW_SUBMODE_P2PDEV  0x1  /* p2p device mode */
+#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */
+#define HI_OPTION_FW_SUBMODE_P2PGO   0x3 /* p2p go mode */
+
+/* Num dev Mask */
+#define HI_OPTION_NUM_DEV_MASK    0x7
+#define HI_OPTION_NUM_DEV_SHIFT   0x9
+
+/* firmware bridging */
+#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
+
+/*
+Fw Mode/SubMode Mask
+|-----------------------------------------------------------------------------|
+|  SUB   |   SUB   |   SUB   |  SUB    |         |         |         |        |
+|MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]|
+|  (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)   |   (2)  |
+|-----------------------------------------------------------------------------|
+*/
+#define HI_OPTION_FW_MODE_BITS         0x2
+#define HI_OPTION_FW_MODE_MASK         0x3
+#define HI_OPTION_FW_MODE_SHIFT        0xC
+#define HI_OPTION_ALL_FW_MODE_MASK     0xFF
+
+#define HI_OPTION_FW_SUBMODE_BITS      0x2
+#define HI_OPTION_FW_SUBMODE_MASK      0x3
+#define HI_OPTION_FW_SUBMODE_SHIFT     0x14
+#define HI_OPTION_ALL_FW_SUBMODE_MASK  0xFF00
+#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
+
+
+/* hi_option_flag2 options */
+#define HI_OPTION_OFFLOAD_AMSDU     0x01
+#define HI_OPTION_DFS_SUPPORT       0x02 /* Enable DFS support */
+#define HI_OPTION_ENABLE_RFKILL     0x04 /* RFKill Enable Feature*/
+#define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */
+#define HI_OPTION_EARLY_CFG_DONE    0x10 /* Early configuration is complete */
+
+#define HI_OPTION_RF_KILL_SHIFT     0x2
+#define HI_OPTION_RF_KILL_MASK      0x1
+
+/* hi_reset_flag */
+/* preserve App Start address */
+#define HI_RESET_FLAG_PRESERVE_APP_START         0x01
+/* preserve host interest */
+#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST     0x02
+/* preserve ROM data */
+#define HI_RESET_FLAG_PRESERVE_ROMDATA           0x04
+#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE       0x08
+#define HI_RESET_FLAG_PRESERVE_BOOT_INFO         0x10
+#define HI_RESET_FLAG_WARM_RESET       0x20
+
+/* define hi_fw_swap bits */
+#define HI_DESC_IN_FW_BIT      0x01
+
+/* indicate the reset flag is valid */
+#define HI_RESET_FLAG_IS_VALID  0x12345678
+
+/* ACS is enabled */
+#define HI_ACS_FLAGS_ENABLED        (1 << 0)
+/* Use physical WWAN device */
+#define HI_ACS_FLAGS_USE_WWAN       (1 << 1)
+/* Use test VAP */
+#define HI_ACS_FLAGS_TEST_VAP       (1 << 2)
+
+/*
+ * CONSOLE FLAGS
+ *
+ * Bit Range  Meaning
+ * ---------  --------------------------------
+ *   2..0     UART ID (0 = Default)
+ *    3       Baud Select (0 = 9600, 1 = 115200)
+ *   30..4    Reserved
+ *    31      Enable Console
+ *
+ */
+
+#define HI_CONSOLE_FLAGS_ENABLE       (1 << 31)
+#define HI_CONSOLE_FLAGS_UART_MASK    (0x7)
+#define HI_CONSOLE_FLAGS_UART_SHIFT   0
+#define HI_CONSOLE_FLAGS_BAUD_SELECT  (1 << 3)
+
+/* SM power save options */
+#define HI_SMPS_ALLOW_MASK            (0x00000001)
+#define HI_SMPS_MODE_MASK             (0x00000002)
+#define HI_SMPS_MODE_STATIC           (0x00000000)
+#define HI_SMPS_MODE_DYNAMIC          (0x00000002)
+#define HI_SMPS_DISABLE_AUTO_MODE     (0x00000004)
+#define HI_SMPS_DATA_THRESH_MASK      (0x000007f8)
+#define HI_SMPS_DATA_THRESH_SHIFT     (3)
+#define HI_SMPS_RSSI_THRESH_MASK      (0x0007f800)
+#define HI_SMPS_RSSI_THRESH_SHIFT     (11)
+#define HI_SMPS_LOWPWR_CM_MASK        (0x00380000)
+#define HI_SMPS_LOWPWR_CM_SHIFT       (15)
+#define HI_SMPS_HIPWR_CM_MASK         (0x03c00000)
+#define HI_SMPS_HIPWR_CM_SHIFT        (19)
+
+/*
+ * WOW Extension configuration
+ *
+ * Bit Range  Meaning
+ * ---------  --------------------------------
+ *   8..0     Size of each WOW pattern (max 511)
+ *   15..9    Number of patterns per list (max 127)
+ *   17..16   Number of lists (max 4)
+ *   30..18   Reserved
+ *   31       Enabled
+ *
+ *  set values (except enable) to zeros for default settings
+ */
+
+#define HI_WOW_EXT_ENABLED_MASK        (1 << 31)
+#define HI_WOW_EXT_NUM_LIST_SHIFT      16
+#define HI_WOW_EXT_NUM_LIST_MASK       (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_NUM_PATTERNS_SHIFT  9
+#define HI_WOW_EXT_NUM_PATTERNS_MASK   (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_PATTERN_SIZE_SHIFT  0
+#define HI_WOW_EXT_PATTERN_SIZE_MASK   (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+#define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \
+       ((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \
+               HI_WOW_EXT_NUM_LIST_MASK) | \
+       (((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \
+               HI_WOW_EXT_NUM_PATTERNS_MASK) | \
+       (((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \
+               HI_WOW_EXT_PATTERN_SIZE_MASK))
+
+#define HI_WOW_EXT_GET_NUM_LISTS(config) \
+       (((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_GET_NUM_PATTERNS(config) \
+       (((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \
+               HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_GET_PATTERN_SIZE(config) \
+       (((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \
+               HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+/*
+ * Early allocation configuration
+ * Support RAM bank configuration before BMI done and this eases the memory
+ * allocation at very early stage
+ * Bit Range  Meaning
+ * ---------  ----------------------------------
+ * [0:3]      number of bank assigned to be IRAM
+ * [4:15]     reserved
+ * [16:31]    magic number
+ *
+ * Note:
+ * 1. target firmware would check magic number and if it's a match, firmware
+ *    would consider the bits[0:15] are valid and base on that to calculate
+ *    the end of DRAM. Early allocation would be located at that area and
+ *    may be reclaimed when necesary
+ * 2. if no magic number is found, early allocation would happen at "_end"
+ *    symbol of ROM which is located before the app-data and might NOT be
+ *    re-claimable. If this is adopted, link script should keep this in
+ *    mind to avoid data corruption.
+ */
+#define HI_EARLY_ALLOC_MAGIC           0x6d8a
+#define HI_EARLY_ALLOC_MAGIC_MASK      0xffff0000
+#define HI_EARLY_ALLOC_MAGIC_SHIFT     16
+#define HI_EARLY_ALLOC_IRAM_BANKS_MASK 0x0000000f
+#define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT        0
+
+#define HI_EARLY_ALLOC_VALID() \
+       ((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \
+       HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC))
+#define HI_EARLY_ALLOC_GET_IRAM_BANKS() \
+       (((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \
+       >> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
+
+/*power save flag bit definitions*/
+#define HI_PWR_SAVE_LPL_ENABLED   0x1
+/*b1-b3 reserved*/
+/*b4-b5 : dev0 LPL type : 0 - none
+                         1- Reduce Pwr Search
+                         2- Reduce Pwr Listen*/
+/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
+#define HI_PWR_SAVE_LPL_DEV0_LSB   4
+#define HI_PWR_SAVE_LPL_DEV_MASK   0x3
+/*power save related utility macros*/
+#define HI_LPL_ENABLED() \
+       ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
+#define HI_DEV_LPL_TYPE_GET(_devix) \
+       (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
+        (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
+
+#define HOST_INTEREST_SMPS_IS_ALLOWED() \
+       ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
+
+/* Reserve 1024 bytes for extended board data */
+#define QCA988X_BOARD_DATA_SZ     7168
+#define QCA988X_BOARD_EXT_DATA_SZ 0
+
+#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c
new file mode 100644 (file)
index 0000000..4a31e2c
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
new file mode 100644 (file)
index 0000000..85e806b
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH10K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath10k
+
+#define ATH10K_MSG_MAX 200
+
+DECLARE_EVENT_CLASS(ath10k_log_event,
+       TP_PROTO(struct va_format *vaf),
+       TP_ARGS(vaf),
+       TP_STRUCT__entry(
+               __dynamic_array(char, msg, ATH10K_MSG_MAX)
+       ),
+       TP_fast_assign(
+               WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+                                      ATH10K_MSG_MAX,
+                                      vaf->fmt,
+                                      *vaf->va) >= ATH10K_MSG_MAX);
+       ),
+       TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
+            TP_PROTO(struct va_format *vaf),
+            TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
+            TP_PROTO(struct va_format *vaf),
+            TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
+            TP_PROTO(struct va_format *vaf),
+            TP_ARGS(vaf)
+);
+
+TRACE_EVENT(ath10k_log_dbg,
+       TP_PROTO(unsigned int level, struct va_format *vaf),
+       TP_ARGS(level, vaf),
+       TP_STRUCT__entry(
+               __field(unsigned int, level)
+               __dynamic_array(char, msg, ATH10K_MSG_MAX)
+       ),
+       TP_fast_assign(
+               __entry->level = level;
+               WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+                                      ATH10K_MSG_MAX,
+                                      vaf->fmt,
+                                      *vaf->va) >= ATH10K_MSG_MAX);
+       ),
+       TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(ath10k_log_dbg_dump,
+       TP_PROTO(const char *msg, const char *prefix,
+                const void *buf, size_t buf_len),
+
+       TP_ARGS(msg, prefix, buf, buf_len),
+
+       TP_STRUCT__entry(
+               __string(msg, msg)
+               __string(prefix, prefix)
+               __field(size_t, buf_len)
+               __dynamic_array(u8, buf, buf_len)
+       ),
+
+       TP_fast_assign(
+               __assign_str(msg, msg);
+               __assign_str(prefix, prefix);
+               __entry->buf_len = buf_len;
+               memcpy(__get_dynamic_array(buf), buf, buf_len);
+       ),
+
+       TP_printk(
+               "%s/%s\n", __get_str(prefix), __get_str(msg)
+       )
+);
+
+TRACE_EVENT(ath10k_wmi_cmd,
+       TP_PROTO(int id, void *buf, size_t buf_len),
+
+       TP_ARGS(id, buf, buf_len),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, id)
+               __field(size_t, buf_len)
+               __dynamic_array(u8, buf, buf_len)
+       ),
+
+       TP_fast_assign(
+               __entry->id = id;
+               __entry->buf_len = buf_len;
+               memcpy(__get_dynamic_array(buf), buf, buf_len);
+       ),
+
+       TP_printk(
+               "id %d len %zu",
+               __entry->id,
+               __entry->buf_len
+       )
+);
+
+TRACE_EVENT(ath10k_wmi_event,
+       TP_PROTO(int id, void *buf, size_t buf_len),
+
+       TP_ARGS(id, buf, buf_len),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, id)
+               __field(size_t, buf_len)
+               __dynamic_array(u8, buf, buf_len)
+       ),
+
+       TP_fast_assign(
+               __entry->id = id;
+               __entry->buf_len = buf_len;
+               memcpy(__get_dynamic_array(buf), buf, buf_len);
+       ),
+
+       TP_printk(
+               "id %d len %zu",
+               __entry->id,
+               __entry->buf_len
+       )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
new file mode 100644 (file)
index 0000000..68b6fae
--- /dev/null
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "txrx.h"
+#include "htt.h"
+#include "mac.h"
+#include "debug.h"
+
+static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+       if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
+               return;
+
+       /* If the original wait_for_completion() timed out before
+        * {data,mgmt}_tx_completed() was called then we could complete
+        * offchan_tx_completed for a different skb. Prevent this by using
+        * offchan_tx_skb. */
+       spin_lock_bh(&ar->data_lock);
+       if (ar->offchan_tx_skb != skb) {
+               ath10k_warn("completed old offchannel frame\n");
+               goto out;
+       }
+
+       complete(&ar->offchan_tx_completed);
+       ar->offchan_tx_skb = NULL; /* just for sanity */
+
+       ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
+out:
+       spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
+{
+       struct device *dev = htt->ar->dev;
+       struct ieee80211_tx_info *info;
+       struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
+       struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
+       int ret;
+
+       if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
+               return;
+
+       ATH10K_SKB_CB(txdesc)->htt.refcount--;
+
+       if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
+               return;
+
+       if (txfrag) {
+               ret = ath10k_skb_unmap(dev, txfrag);
+               if (ret)
+                       ath10k_warn("txfrag unmap failed (%d)\n", ret);
+
+               dev_kfree_skb_any(txfrag);
+       }
+
+       ret = ath10k_skb_unmap(dev, msdu);
+       if (ret)
+               ath10k_warn("data skb unmap failed (%d)\n", ret);
+
+       ath10k_report_offchan_tx(htt->ar, msdu);
+
+       info = IEEE80211_SKB_CB(msdu);
+       memset(&info->status, 0, sizeof(info->status));
+
+       if (ATH10K_SKB_CB(txdesc)->htt.discard) {
+               ieee80211_free_txskb(htt->ar->hw, msdu);
+               goto exit;
+       }
+
+       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+               info->flags |= IEEE80211_TX_STAT_ACK;
+
+       if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
+               info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+       ieee80211_tx_status(htt->ar->hw, msdu);
+       /* we do not own the msdu anymore */
+
+exit:
+       spin_lock_bh(&htt->tx_lock);
+       htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
+       ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
+       __ath10k_htt_tx_dec_pending(htt);
+       if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
+               wake_up(&htt->empty_tx_wq);
+       spin_unlock_bh(&htt->tx_lock);
+
+       dev_kfree_skb_any(txdesc);
+}
+
+void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
+                             const struct htt_tx_done *tx_done)
+{
+       struct sk_buff *txdesc;
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
+                  tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
+
+       if (tx_done->msdu_id >= htt->max_num_pending_tx) {
+               ath10k_warn("warning: msdu_id %d too big, ignoring\n",
+                           tx_done->msdu_id);
+               return;
+       }
+
+       txdesc = htt->pending_tx[tx_done->msdu_id];
+
+       ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
+       ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
+
+       ath10k_txrx_tx_unref(htt, txdesc);
+}
+
+static const u8 rx_legacy_rate_idx[] = {
+       3,      /* 0x00  - 11Mbps  */
+       2,      /* 0x01  - 5.5Mbps */
+       1,      /* 0x02  - 2Mbps   */
+       0,      /* 0x03  - 1Mbps   */
+       3,      /* 0x04  - 11Mbps  */
+       2,      /* 0x05  - 5.5Mbps */
+       1,      /* 0x06  - 2Mbps   */
+       0,      /* 0x07  - 1Mbps   */
+       10,     /* 0x08  - 48Mbps  */
+       8,      /* 0x09  - 24Mbps  */
+       6,      /* 0x0A  - 12Mbps  */
+       4,      /* 0x0B  - 6Mbps   */
+       11,     /* 0x0C  - 54Mbps  */
+       9,      /* 0x0D  - 36Mbps  */
+       7,      /* 0x0E  - 18Mbps  */
+       5,      /* 0x0F  - 9Mbps   */
+};
+
+static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
+                            enum ieee80211_band band,
+                            struct ieee80211_rx_status *status)
+{
+       u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+       u8 info0 = info->rate.info0;
+       u32 info1 = info->rate.info1;
+       u32 info2 = info->rate.info2;
+       u8 preamble = 0;
+
+       /* Check if valid fields */
+       if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
+               return;
+
+       preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
+
+       switch (preamble) {
+       case HTT_RX_LEGACY:
+               cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
+               rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
+               rate_idx = 0;
+
+               if (rate < 0x08 || rate > 0x0F)
+                       break;
+
+               switch (band) {
+               case IEEE80211_BAND_2GHZ:
+                       if (cck)
+                               rate &= ~BIT(3);
+                       rate_idx = rx_legacy_rate_idx[rate];
+                       break;
+               case IEEE80211_BAND_5GHZ:
+                       rate_idx = rx_legacy_rate_idx[rate];
+                       /* We are using same rate table registering
+                          HW - ath10k_rates[]. In case of 5GHz skip
+                          CCK rates, so -4 here */
+                       rate_idx -= 4;
+                       break;
+               default:
+                       break;
+               }
+
+               status->rate_idx = rate_idx;
+               break;
+       case HTT_RX_HT:
+       case HTT_RX_HT_WITH_TXBF:
+               /* HT-SIG - Table 20-11 in info1 and info2 */
+               mcs = info1 & 0x1F;
+               nss = mcs >> 3;
+               bw = (info1 >> 7) & 1;
+               sgi = (info2 >> 7) & 1;
+
+               status->rate_idx = mcs;
+               status->flag |= RX_FLAG_HT;
+               if (sgi)
+                       status->flag |= RX_FLAG_SHORT_GI;
+               if (bw)
+                       status->flag |= RX_FLAG_40MHZ;
+               break;
+       case HTT_RX_VHT:
+       case HTT_RX_VHT_WITH_TXBF:
+               /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
+                  TODO check this */
+               mcs = (info2 >> 4) & 0x0F;
+               nss = (info1 >> 10) & 0x07;
+               bw = info1 & 3;
+               sgi = info2 & 1;
+
+               status->rate_idx = mcs;
+               status->vht_nss = nss;
+
+               if (sgi)
+                       status->flag |= RX_FLAG_SHORT_GI;
+
+               switch (bw) {
+               /* 20MHZ */
+               case 0:
+                       break;
+               /* 40MHZ */
+               case 1:
+                       status->flag |= RX_FLAG_40MHZ;
+                       break;
+               /* 80MHZ */
+               case 2:
+                       status->flag |= RX_FLAG_80MHZ;
+               }
+
+               status->flag |= RX_FLAG_VHT;
+               break;
+       default:
+               break;
+       }
+}
+
+void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
+{
+       struct ieee80211_rx_status *status;
+       struct ieee80211_channel *ch;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
+
+       status = IEEE80211_SKB_RXCB(info->skb);
+       memset(status, 0, sizeof(*status));
+
+       if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
+               status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
+                               RX_FLAG_MMIC_STRIPPED;
+               hdr->frame_control = __cpu_to_le16(
+                               __le16_to_cpu(hdr->frame_control) &
+                               ~IEEE80211_FCTL_PROTECTED);
+       }
+
+       if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
+               status->flag |= RX_FLAG_MMIC_ERROR;
+
+       if (info->fcs_err)
+               status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+       status->signal = info->signal;
+
+       spin_lock_bh(&ar->data_lock);
+       ch = ar->scan_channel;
+       if (!ch)
+               ch = ar->rx_channel;
+       spin_unlock_bh(&ar->data_lock);
+
+       if (!ch) {
+               ath10k_warn("no channel configured; ignoring frame!\n");
+               dev_kfree_skb_any(info->skb);
+               return;
+       }
+
+       process_rx_rates(ar, info, ch->band, status);
+       status->band = ch->band;
+       status->freq = ch->center_freq;
+
+       ath10k_dbg(ATH10K_DBG_DATA,
+                  "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n",
+                  info->skb,
+                  info->skb->len,
+                  status->flag == 0 ? "legacy" : "",
+                  status->flag & RX_FLAG_HT ? "ht" : "",
+                  status->flag & RX_FLAG_VHT ? "vht" : "",
+                  status->flag & RX_FLAG_40MHZ ? "40" : "",
+                  status->flag & RX_FLAG_80MHZ ? "80" : "",
+                  status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
+                  status->rate_idx,
+                  status->vht_nss,
+                  status->freq,
+                  status->band);
+
+       ieee80211_rx(ar->hw, info->skb);
+}
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+                                    const u8 *addr)
+{
+       struct ath10k_peer *peer;
+
+       lockdep_assert_held(&ar->data_lock);
+
+       list_for_each_entry(peer, &ar->peers, list) {
+               if (peer->vdev_id != vdev_id)
+                       continue;
+               if (memcmp(peer->addr, addr, ETH_ALEN))
+                       continue;
+
+               return peer;
+       }
+
+       return NULL;
+}
+
+static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar,
+                                                 int peer_id)
+{
+       struct ath10k_peer *peer;
+
+       lockdep_assert_held(&ar->data_lock);
+
+       list_for_each_entry(peer, &ar->peers, list)
+               if (test_bit(peer_id, peer->peer_ids))
+                       return peer;
+
+       return NULL;
+}
+
+static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
+                                      const u8 *addr, bool expect_mapped)
+{
+       int ret;
+
+       ret = wait_event_timeout(ar->peer_mapping_wq, ({
+                       bool mapped;
+
+                       spin_lock_bh(&ar->data_lock);
+                       mapped = !!ath10k_peer_find(ar, vdev_id, addr);
+                       spin_unlock_bh(&ar->data_lock);
+
+                       mapped == expect_mapped;
+               }), 3*HZ);
+
+       if (ret <= 0)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+       return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
+}
+
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+       return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
+}
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+                          struct htt_peer_map_event *ev)
+{
+       struct ath10k *ar = htt->ar;
+       struct ath10k_peer *peer;
+
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
+       if (!peer) {
+               peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+               if (!peer)
+                       goto exit;
+
+               peer->vdev_id = ev->vdev_id;
+               memcpy(peer->addr, ev->addr, ETH_ALEN);
+               list_add(&peer->list, &ar->peers);
+               wake_up(&ar->peer_mapping_wq);
+       }
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
+                  ev->vdev_id, ev->addr, ev->peer_id);
+
+       set_bit(ev->peer_id, peer->peer_ids);
+exit:
+       spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+                            struct htt_peer_unmap_event *ev)
+{
+       struct ath10k *ar = htt->ar;
+       struct ath10k_peer *peer;
+
+       spin_lock_bh(&ar->data_lock);
+       peer = ath10k_peer_find_by_id(ar, ev->peer_id);
+       if (!peer) {
+               ath10k_warn("unknown peer id %d\n", ev->peer_id);
+               goto exit;
+       }
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+                  peer->vdev_id, peer->addr, ev->peer_id);
+
+       clear_bit(ev->peer_id, peer->peer_ids);
+
+       if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
+               list_del(&peer->list);
+               kfree(peer);
+               wake_up(&ar->peer_mapping_wq);
+       }
+
+exit:
+       spin_unlock_bh(&ar->data_lock);
+}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
new file mode 100644 (file)
index 0000000..e78632a
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include "htt.h"
+
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
+void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
+                             const struct htt_tx_done *tx_done);
+void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+                                    const u8 *addr);
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
+                                const u8 *addr);
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
+                                const u8 *addr);
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+                          struct htt_peer_map_event *ev);
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+                            struct htt_peer_unmap_event *ev);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
new file mode 100644 (file)
index 0000000..7d4b798
--- /dev/null
@@ -0,0 +1,2081 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+
+#include "core.h"
+#include "htc.h"
+#include "debug.h"
+#include "wmi.h"
+#include "mac.h"
+
+void ath10k_wmi_flush_tx(struct ath10k *ar)
+{
+       int ret;
+
+       ret = wait_event_timeout(ar->wmi.wq,
+                                atomic_read(&ar->wmi.pending_tx_count) == 0,
+                                5*HZ);
+       if (atomic_read(&ar->wmi.pending_tx_count) == 0)
+               return;
+
+       if (ret == 0)
+               ret = -ETIMEDOUT;
+
+       if (ret < 0)
+               ath10k_warn("wmi flush failed (%d)\n", ret);
+}
+
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
+{
+       int ret;
+       ret = wait_for_completion_timeout(&ar->wmi.service_ready,
+                                         WMI_SERVICE_READY_TIMEOUT_HZ);
+       return ret;
+}
+
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
+{
+       int ret;
+       ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
+                                         WMI_UNIFIED_READY_TIMEOUT_HZ);
+       return ret;
+}
+
+static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
+{
+       struct sk_buff *skb;
+       u32 round_len = roundup(len, 4);
+
+       skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len);
+       if (!skb)
+               return NULL;
+
+       skb_reserve(skb, WMI_SKB_HEADROOM);
+       if (!IS_ALIGNED((unsigned long)skb->data, 4))
+               ath10k_warn("Unaligned WMI skb\n");
+
+       skb_put(skb, round_len);
+       memset(skb->data, 0, round_len);
+
+       return skb;
+}
+
+static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+       dev_kfree_skb(skb);
+
+       if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
+               wake_up(&ar->wmi.wq);
+}
+
+/* WMI command API */
+static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
+                              enum wmi_cmd_id cmd_id)
+{
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+       struct wmi_cmd_hdr *cmd_hdr;
+       int status;
+       u32 cmd = 0;
+
+       if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+               return -ENOMEM;
+
+       cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
+
+       cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+       cmd_hdr->cmd_id = __cpu_to_le32(cmd);
+
+       if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
+           WMI_MAX_PENDING_TX_COUNT) {
+               /* avoid using up memory when FW hangs */
+               atomic_dec(&ar->wmi.pending_tx_count);
+               return -EBUSY;
+       }
+
+       memset(skb_cb, 0, sizeof(*skb_cb));
+
+       trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
+
+       status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb);
+       if (status) {
+               dev_kfree_skb_any(skb);
+               atomic_dec(&ar->wmi.pending_tx_count);
+               return status;
+       }
+
+       return 0;
+}
+
+static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
+       enum wmi_scan_event_type event_type;
+       enum wmi_scan_completion_reason reason;
+       u32 freq;
+       u32 req_id;
+       u32 scan_id;
+       u32 vdev_id;
+
+       event_type = __le32_to_cpu(event->event_type);
+       reason     = __le32_to_cpu(event->reason);
+       freq       = __le32_to_cpu(event->channel_freq);
+       req_id     = __le32_to_cpu(event->scan_req_id);
+       scan_id    = __le32_to_cpu(event->scan_id);
+       vdev_id    = __le32_to_cpu(event->vdev_id);
+
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n");
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "scan event type %d reason %d freq %d req_id %d "
+                  "scan_id %d vdev_id %d\n",
+                  event_type, reason, freq, req_id, scan_id, vdev_id);
+
+       spin_lock_bh(&ar->data_lock);
+
+       switch (event_type) {
+       case WMI_SCAN_EVENT_STARTED:
+               ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n");
+               if (ar->scan.in_progress && ar->scan.is_roc)
+                       ieee80211_ready_on_channel(ar->hw);
+
+               complete(&ar->scan.started);
+               break;
+       case WMI_SCAN_EVENT_COMPLETED:
+               ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n");
+               switch (reason) {
+               case WMI_SCAN_REASON_COMPLETED:
+                       ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n");
+                       break;
+               case WMI_SCAN_REASON_CANCELLED:
+                       ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n");
+                       break;
+               case WMI_SCAN_REASON_PREEMPTED:
+                       ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n");
+                       break;
+               case WMI_SCAN_REASON_TIMEDOUT:
+                       ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n");
+                       break;
+               default:
+                       break;
+               }
+
+               ar->scan_channel = NULL;
+               if (!ar->scan.in_progress) {
+                       ath10k_warn("no scan requested, ignoring\n");
+                       break;
+               }
+
+               if (ar->scan.is_roc) {
+                       ath10k_offchan_tx_purge(ar);
+
+                       if (!ar->scan.aborting)
+                               ieee80211_remain_on_channel_expired(ar->hw);
+               } else {
+                       ieee80211_scan_completed(ar->hw, ar->scan.aborting);
+               }
+
+               del_timer(&ar->scan.timeout);
+               complete_all(&ar->scan.completed);
+               ar->scan.in_progress = false;
+               break;
+       case WMI_SCAN_EVENT_BSS_CHANNEL:
+               ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n");
+               ar->scan_channel = NULL;
+               break;
+       case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
+               ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n");
+               ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+               if (ar->scan.in_progress && ar->scan.is_roc &&
+                   ar->scan.roc_freq == freq) {
+                       complete(&ar->scan.on_channel);
+               }
+               break;
+       case WMI_SCAN_EVENT_DEQUEUED:
+               ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n");
+               break;
+       case WMI_SCAN_EVENT_PREEMPTED:
+               ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n");
+               break;
+       case WMI_SCAN_EVENT_START_FAILED:
+               ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n");
+               break;
+       default:
+               break;
+       }
+
+       spin_unlock_bh(&ar->data_lock);
+       return 0;
+}
+
+static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
+{
+       enum ieee80211_band band;
+
+       switch (phy_mode) {
+       case MODE_11A:
+       case MODE_11NA_HT20:
+       case MODE_11NA_HT40:
+       case MODE_11AC_VHT20:
+       case MODE_11AC_VHT40:
+       case MODE_11AC_VHT80:
+               band = IEEE80211_BAND_5GHZ;
+               break;
+       case MODE_11G:
+       case MODE_11B:
+       case MODE_11GONLY:
+       case MODE_11NG_HT20:
+       case MODE_11NG_HT40:
+       case MODE_11AC_VHT20_2G:
+       case MODE_11AC_VHT40_2G:
+       case MODE_11AC_VHT80_2G:
+       default:
+               band = IEEE80211_BAND_2GHZ;
+       }
+
+       return band;
+}
+
+static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
+{
+       u8 rate_idx = 0;
+
+       /* rate in Kbps */
+       switch (rate) {
+       case 1000:
+               rate_idx = 0;
+               break;
+       case 2000:
+               rate_idx = 1;
+               break;
+       case 5500:
+               rate_idx = 2;
+               break;
+       case 11000:
+               rate_idx = 3;
+               break;
+       case 6000:
+               rate_idx = 4;
+               break;
+       case 9000:
+               rate_idx = 5;
+               break;
+       case 12000:
+               rate_idx = 6;
+               break;
+       case 18000:
+               rate_idx = 7;
+               break;
+       case 24000:
+               rate_idx = 8;
+               break;
+       case 36000:
+               rate_idx = 9;
+               break;
+       case 48000:
+               rate_idx = 10;
+               break;
+       case 54000:
+               rate_idx = 11;
+               break;
+       default:
+               break;
+       }
+
+       if (band == IEEE80211_BAND_5GHZ) {
+               if (rate_idx > 3)
+                       /* Omit CCK rates */
+                       rate_idx -= 4;
+               else
+                       rate_idx = 0;
+       }
+
+       return rate_idx;
+}
+
+static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_hdr *hdr;
+       u32 rx_status;
+       u32 channel;
+       u32 phy_mode;
+       u32 snr;
+       u32 rate;
+       u32 buf_len;
+       u16 fc;
+
+       channel   = __le32_to_cpu(event->hdr.channel);
+       buf_len   = __le32_to_cpu(event->hdr.buf_len);
+       rx_status = __le32_to_cpu(event->hdr.status);
+       snr       = __le32_to_cpu(event->hdr.snr);
+       phy_mode  = __le32_to_cpu(event->hdr.phy_mode);
+       rate      = __le32_to_cpu(event->hdr.rate);
+
+       memset(status, 0, sizeof(*status));
+
+       ath10k_dbg(ATH10K_DBG_MGMT,
+                  "event mgmt rx status %08x\n", rx_status);
+
+       if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
+               dev_kfree_skb(skb);
+               return 0;
+       }
+
+       if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
+               dev_kfree_skb(skb);
+               return 0;
+       }
+
+       if (rx_status & WMI_RX_STATUS_ERR_CRC)
+               status->flag |= RX_FLAG_FAILED_FCS_CRC;
+       if (rx_status & WMI_RX_STATUS_ERR_MIC)
+               status->flag |= RX_FLAG_MMIC_ERROR;
+
+       status->band = phy_mode_to_band(phy_mode);
+       status->freq = ieee80211_channel_to_frequency(channel, status->band);
+       status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
+       status->rate_idx = get_rate_idx(rate, status->band);
+
+       skb_pull(skb, sizeof(event->hdr));
+
+       hdr = (struct ieee80211_hdr *)skb->data;
+       fc = le16_to_cpu(hdr->frame_control);
+
+       if (fc & IEEE80211_FCTL_PROTECTED) {
+               status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
+                               RX_FLAG_MMIC_STRIPPED;
+               hdr->frame_control = __cpu_to_le16(fc &
+                                       ~IEEE80211_FCTL_PROTECTED);
+       }
+
+       ath10k_dbg(ATH10K_DBG_MGMT,
+                  "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+                  skb, skb->len,
+                  fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+       ath10k_dbg(ATH10K_DBG_MGMT,
+                  "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+                  status->freq, status->band, status->signal,
+                  status->rate_idx);
+
+       /*
+        * packets from HTC come aligned to 4byte boundaries
+        * because they can originally come in along with a trailer
+        */
+       skb_trim(skb, buf_len);
+
+       ieee80211_rx(ar->hw, skb);
+       return 0;
+}
+
+static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n");
+}
+
+static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
+}
+
+static void ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_MESG_EVENTID\n");
+}
+
+static void ath10k_wmi_event_update_stats(struct ath10k *ar,
+                                         struct sk_buff *skb)
+{
+       struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
+
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
+
+       ath10k_debug_read_target_stats(ar, ev);
+}
+
+static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       struct wmi_vdev_start_response_event *ev;
+
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
+
+       ev = (struct wmi_vdev_start_response_event *)skb->data;
+
+       if (WARN_ON(__le32_to_cpu(ev->status)))
+               return;
+
+       complete(&ar->vdev_setup_done);
+}
+
+static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
+                                         struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
+       complete(&ar->vdev_setup_done);
+}
+
+static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
+                                             struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n");
+}
+
+/*
+ * FIXME
+ *
+ * We don't report to mac80211 sleep state of connected
+ * stations. Due to this mac80211 can't fill in TIM IE
+ * correctly.
+ *
+ * I know of no way of getting nullfunc frames that contain
+ * sleep transition from connected stations - these do not
+ * seem to be sent from the target to the host. There also
+ * doesn't seem to be a dedicated event for that. So the
+ * only way left to do this would be to read tim_bitmap
+ * during SWBA.
+ *
+ * We could probably try using tim_bitmap from SWBA to tell
+ * mac80211 which stations are asleep and which are not. The
+ * problem here is calling mac80211 functions so many times
+ * could take too long and make us miss the time to submit
+ * the beacon to the target.
+ *
+ * So as a workaround we try to extend the TIM IE if there
+ * is unicast buffered for stations with aid > 7 and fill it
+ * in ourselves.
+ */
+static void ath10k_wmi_update_tim(struct ath10k *ar,
+                                 struct ath10k_vif *arvif,
+                                 struct sk_buff *bcn,
+                                 struct wmi_bcn_info *bcn_info)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
+       struct ieee80211_tim_ie *tim;
+       u8 *ies, *ie;
+       u8 ie_len, pvm_len;
+
+       /* if next SWBA has no tim_changed the tim_bitmap is garbage.
+        * we must copy the bitmap upon change and reuse it later */
+       if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
+               int i;
+
+               BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
+                            sizeof(bcn_info->tim_info.tim_bitmap));
+
+               for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
+                       __le32 t = bcn_info->tim_info.tim_bitmap[i / 4];
+                       u32 v = __le32_to_cpu(t);
+                       arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
+               }
+
+               /* FW reports either length 0 or 16
+                * so we calculate this on our own */
+               arvif->u.ap.tim_len = 0;
+               for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
+                       if (arvif->u.ap.tim_bitmap[i])
+                               arvif->u.ap.tim_len = i;
+
+               arvif->u.ap.tim_len++;
+       }
+
+       ies = bcn->data;
+       ies += ieee80211_hdrlen(hdr->frame_control);
+       ies += 12; /* fixed parameters */
+
+       ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
+                                   (u8 *)skb_tail_pointer(bcn) - ies);
+       if (!ie) {
+               /* highly unlikely for mac80211 */
+               ath10k_warn("no tim ie found;\n");
+               return;
+       }
+
+       tim = (void *)ie + 2;
+       ie_len = ie[1];
+       pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
+
+       if (pvm_len < arvif->u.ap.tim_len) {
+               int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
+               int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
+               void *next_ie = ie + 2 + ie_len;
+
+               if (skb_put(bcn, expand_size)) {
+                       memmove(next_ie + expand_size, next_ie, move_size);
+
+                       ie[1] += expand_size;
+                       ie_len += expand_size;
+                       pvm_len += expand_size;
+               } else {
+                       ath10k_warn("tim expansion failed\n");
+               }
+       }
+
+       if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
+               ath10k_warn("tim pvm length is too great (%d)\n", pvm_len);
+               return;
+       }
+
+       tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
+       memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
+
+       ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
+                  tim->dtim_count, tim->dtim_period,
+                  tim->bitmap_ctrl, pvm_len);
+}
+
+static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
+                                  struct wmi_p2p_noa_info *noa)
+{
+       struct ieee80211_p2p_noa_attr *noa_attr;
+       u8  ctwindow_oppps = noa->ctwindow_oppps;
+       u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
+       bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
+       __le16 *noa_attr_len;
+       u16 attr_len;
+       u8 noa_descriptors = noa->num_descriptors;
+       int i;
+
+       /* P2P IE */
+       data[0] = WLAN_EID_VENDOR_SPECIFIC;
+       data[1] = len - 2;
+       data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+       data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+       data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+       data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+       /* NOA ATTR */
+       data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+       noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+       noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+       noa_attr->index = noa->index;
+       noa_attr->oppps_ctwindow = ctwindow;
+       if (oppps)
+               noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+       for (i = 0; i < noa_descriptors; i++) {
+               noa_attr->desc[i].count =
+                       __le32_to_cpu(noa->descriptors[i].type_count);
+               noa_attr->desc[i].duration = noa->descriptors[i].duration;
+               noa_attr->desc[i].interval = noa->descriptors[i].interval;
+               noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+       }
+
+       attr_len = 2; /* index + oppps_ctwindow */
+       attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+       *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
+{
+       u32 len = 0;
+       u8 noa_descriptors = noa->num_descriptors;
+       u8 opp_ps_info = noa->ctwindow_oppps;
+       bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
+
+
+       if (!noa_descriptors && !opps_enabled)
+               return len;
+
+       len += 1 + 1 + 4; /* EID + len + OUI */
+       len += 1 + 2; /* noa attr  + attr len */
+       len += 1 + 1; /* index + oppps_ctwindow */
+       len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+
+       return len;
+}
+
+static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
+                                 struct sk_buff *bcn,
+                                 struct wmi_bcn_info *bcn_info)
+{
+       struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
+       u8 *new_data, *old_data = arvif->u.ap.noa_data;
+       u32 new_len;
+
+       if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+               return;
+
+       ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
+       if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
+               new_len = ath10k_p2p_calc_noa_ie_len(noa);
+               if (!new_len)
+                       goto cleanup;
+
+               new_data = kmalloc(new_len, GFP_ATOMIC);
+               if (!new_data)
+                       goto cleanup;
+
+               ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
+
+               spin_lock_bh(&ar->data_lock);
+               arvif->u.ap.noa_data = new_data;
+               arvif->u.ap.noa_len = new_len;
+               spin_unlock_bh(&ar->data_lock);
+               kfree(old_data);
+       }
+
+       if (arvif->u.ap.noa_data)
+               if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
+                       memcpy(skb_put(bcn, arvif->u.ap.noa_len),
+                              arvif->u.ap.noa_data,
+                              arvif->u.ap.noa_len);
+       return;
+
+cleanup:
+       spin_lock_bh(&ar->data_lock);
+       arvif->u.ap.noa_data = NULL;
+       arvif->u.ap.noa_len = 0;
+       spin_unlock_bh(&ar->data_lock);
+       kfree(old_data);
+}
+
+
+static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_host_swba_event *ev;
+       u32 map;
+       int i = -1;
+       struct wmi_bcn_info *bcn_info;
+       struct ath10k_vif *arvif;
+       struct wmi_bcn_tx_arg arg;
+       struct sk_buff *bcn;
+       int vdev_id = 0;
+       int ret;
+
+       ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
+
+       ev = (struct wmi_host_swba_event *)skb->data;
+       map = __le32_to_cpu(ev->vdev_map);
+
+       ath10k_dbg(ATH10K_DBG_MGMT, "host swba:\n"
+                  "-vdev map 0x%x\n",
+                  ev->vdev_map);
+
+       for (; map; map >>= 1, vdev_id++) {
+               if (!(map & 0x1))
+                       continue;
+
+               i++;
+
+               if (i >= WMI_MAX_AP_VDEV) {
+                       ath10k_warn("swba has corrupted vdev map\n");
+                       break;
+               }
+
+               bcn_info = &ev->bcn_info[i];
+
+               ath10k_dbg(ATH10K_DBG_MGMT,
+                          "-bcn_info[%d]:\n"
+                          "--tim_len %d\n"
+                          "--tim_mcast %d\n"
+                          "--tim_changed %d\n"
+                          "--tim_num_ps_pending %d\n"
+                          "--tim_bitmap 0x%08x%08x%08x%08x\n",
+                          i,
+                          __le32_to_cpu(bcn_info->tim_info.tim_len),
+                          __le32_to_cpu(bcn_info->tim_info.tim_mcast),
+                          __le32_to_cpu(bcn_info->tim_info.tim_changed),
+                          __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
+                          __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
+                          __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
+                          __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
+                          __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
+
+               arvif = ath10k_get_arvif(ar, vdev_id);
+               if (arvif == NULL) {
+                       ath10k_warn("no vif for vdev_id %d found\n", vdev_id);
+                       continue;
+               }
+
+               bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
+               if (!bcn) {
+                       ath10k_warn("could not get mac80211 beacon\n");
+                       continue;
+               }
+
+               ath10k_tx_h_seq_no(bcn);
+               ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
+               ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
+
+               arg.vdev_id = arvif->vdev_id;
+               arg.tx_rate = 0;
+               arg.tx_power = 0;
+               arg.bcn = bcn->data;
+               arg.bcn_len = bcn->len;
+
+               ret = ath10k_wmi_beacon_send(ar, &arg);
+               if (ret)
+                       ath10k_warn("could not send beacon (%d)\n", ret);
+
+               dev_kfree_skb_any(bcn);
+       }
+}
+
+static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
+                                              struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
+}
+
+static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PHYERR_EVENTID\n");
+}
+
+static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
+}
+
+static void ath10k_wmi_event_profile_match(struct ath10k *ar,
+                                   struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
+}
+
+static void ath10k_wmi_event_debug_print(struct ath10k *ar,
+                                 struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_DEBUG_PRINT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
+                                              struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
+}
+
+static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
+                                             struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
+}
+
+static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
+}
+
+static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
+                                             struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
+}
+
+static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
+}
+
+static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
+                                          struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
+}
+
+static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
+                                        struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
+}
+
+static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
+                                           struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
+}
+
+static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
+                                           struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
+}
+
+static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
+                                           struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+                                               struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
+}
+
+static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
+                                             struct sk_buff *skb)
+{
+       struct wmi_service_ready_event *ev = (void *)skb->data;
+
+       if (skb->len < sizeof(*ev)) {
+               ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
+                           skb->len, sizeof(*ev));
+               return;
+       }
+
+       ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
+       ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
+       ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
+       ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
+       ar->fw_version_major =
+               (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
+       ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
+       ar->fw_version_release =
+               (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
+       ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
+       ar->phy_capability = __le32_to_cpu(ev->phy_capability);
+
+       ar->ath_common.regulatory.current_rd =
+               __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
+
+       ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
+                                     sizeof(ev->wmi_service_bitmap));
+
+       if (strlen(ar->hw->wiphy->fw_version) == 0) {
+               snprintf(ar->hw->wiphy->fw_version,
+                        sizeof(ar->hw->wiphy->fw_version),
+                        "%u.%u.%u.%u",
+                        ar->fw_version_major,
+                        ar->fw_version_minor,
+                        ar->fw_version_release,
+                        ar->fw_version_build);
+       }
+
+       /* FIXME: it probably should be better to support this */
+       if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
+               ath10k_warn("target requested %d memory chunks; ignoring\n",
+                           __le32_to_cpu(ev->num_mem_reqs));
+       }
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n",
+                  __le32_to_cpu(ev->sw_version),
+                  __le32_to_cpu(ev->sw_version_1),
+                  __le32_to_cpu(ev->abi_version),
+                  __le32_to_cpu(ev->phy_capability),
+                  __le32_to_cpu(ev->ht_cap_info),
+                  __le32_to_cpu(ev->vht_cap_info),
+                  __le32_to_cpu(ev->vht_supp_mcs),
+                  __le32_to_cpu(ev->sys_cap_info),
+                  __le32_to_cpu(ev->num_mem_reqs));
+
+       complete(&ar->wmi.service_ready);
+}
+
+static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
+
+       if (WARN_ON(skb->len < sizeof(*ev)))
+               return -EINVAL;
+
+       memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+                  __le32_to_cpu(ev->sw_version),
+                  __le32_to_cpu(ev->abi_version),
+                  ev->mac_addr.addr,
+                  __le32_to_cpu(ev->status));
+
+       complete(&ar->wmi.unified_ready);
+       return 0;
+}
+
+static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_cmd_hdr *cmd_hdr;
+       enum wmi_event_id id;
+       u16 len;
+
+       cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+       id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+       if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+               return;
+
+       len = skb->len;
+
+       trace_ath10k_wmi_event(id, skb->data, skb->len);
+
+       switch (id) {
+       case WMI_MGMT_RX_EVENTID:
+               ath10k_wmi_event_mgmt_rx(ar, skb);
+               /* mgmt_rx() owns the skb now! */
+               return;
+       case WMI_SCAN_EVENTID:
+               ath10k_wmi_event_scan(ar, skb);
+               break;
+       case WMI_CHAN_INFO_EVENTID:
+               ath10k_wmi_event_chan_info(ar, skb);
+               break;
+       case WMI_ECHO_EVENTID:
+               ath10k_wmi_event_echo(ar, skb);
+               break;
+       case WMI_DEBUG_MESG_EVENTID:
+               ath10k_wmi_event_debug_mesg(ar, skb);
+               break;
+       case WMI_UPDATE_STATS_EVENTID:
+               ath10k_wmi_event_update_stats(ar, skb);
+               break;
+       case WMI_VDEV_START_RESP_EVENTID:
+               ath10k_wmi_event_vdev_start_resp(ar, skb);
+               break;
+       case WMI_VDEV_STOPPED_EVENTID:
+               ath10k_wmi_event_vdev_stopped(ar, skb);
+               break;
+       case WMI_PEER_STA_KICKOUT_EVENTID:
+               ath10k_wmi_event_peer_sta_kickout(ar, skb);
+               break;
+       case WMI_HOST_SWBA_EVENTID:
+               ath10k_wmi_event_host_swba(ar, skb);
+               break;
+       case WMI_TBTTOFFSET_UPDATE_EVENTID:
+               ath10k_wmi_event_tbttoffset_update(ar, skb);
+               break;
+       case WMI_PHYERR_EVENTID:
+               ath10k_wmi_event_phyerr(ar, skb);
+               break;
+       case WMI_ROAM_EVENTID:
+               ath10k_wmi_event_roam(ar, skb);
+               break;
+       case WMI_PROFILE_MATCH:
+               ath10k_wmi_event_profile_match(ar, skb);
+               break;
+       case WMI_DEBUG_PRINT_EVENTID:
+               ath10k_wmi_event_debug_print(ar, skb);
+               break;
+       case WMI_PDEV_QVIT_EVENTID:
+               ath10k_wmi_event_pdev_qvit(ar, skb);
+               break;
+       case WMI_WLAN_PROFILE_DATA_EVENTID:
+               ath10k_wmi_event_wlan_profile_data(ar, skb);
+               break;
+       case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
+               ath10k_wmi_event_rtt_measurement_report(ar, skb);
+               break;
+       case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
+               ath10k_wmi_event_tsf_measurement_report(ar, skb);
+               break;
+       case WMI_RTT_ERROR_REPORT_EVENTID:
+               ath10k_wmi_event_rtt_error_report(ar, skb);
+               break;
+       case WMI_WOW_WAKEUP_HOST_EVENTID:
+               ath10k_wmi_event_wow_wakeup_host(ar, skb);
+               break;
+       case WMI_DCS_INTERFERENCE_EVENTID:
+               ath10k_wmi_event_dcs_interference(ar, skb);
+               break;
+       case WMI_PDEV_TPC_CONFIG_EVENTID:
+               ath10k_wmi_event_pdev_tpc_config(ar, skb);
+               break;
+       case WMI_PDEV_FTM_INTG_EVENTID:
+               ath10k_wmi_event_pdev_ftm_intg(ar, skb);
+               break;
+       case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+               ath10k_wmi_event_gtk_offload_status(ar, skb);
+               break;
+       case WMI_GTK_REKEY_FAIL_EVENTID:
+               ath10k_wmi_event_gtk_rekey_fail(ar, skb);
+               break;
+       case WMI_TX_DELBA_COMPLETE_EVENTID:
+               ath10k_wmi_event_delba_complete(ar, skb);
+               break;
+       case WMI_TX_ADDBA_COMPLETE_EVENTID:
+               ath10k_wmi_event_addba_complete(ar, skb);
+               break;
+       case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+               ath10k_wmi_event_vdev_install_key_complete(ar, skb);
+               break;
+       case WMI_SERVICE_READY_EVENTID:
+               ath10k_wmi_service_ready_event_rx(ar, skb);
+               break;
+       case WMI_READY_EVENTID:
+               ath10k_wmi_ready_event_rx(ar, skb);
+               break;
+       default:
+               ath10k_warn("Unknown eventid: %d\n", id);
+               break;
+       }
+
+       dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_event_work(struct work_struct *work)
+{
+       struct ath10k *ar = container_of(work, struct ath10k,
+                                        wmi.wmi_event_work);
+       struct sk_buff *skb;
+
+       for (;;) {
+               skb = skb_dequeue(&ar->wmi.wmi_event_list);
+               if (!skb)
+                       break;
+
+               ath10k_wmi_event_process(ar, skb);
+       }
+}
+
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+       enum wmi_event_id event_id;
+
+       event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+       /* some events require to be handled ASAP
+        * thus can't be defered to a worker thread */
+       switch (event_id) {
+       case WMI_HOST_SWBA_EVENTID:
+       case WMI_MGMT_RX_EVENTID:
+               ath10k_wmi_event_process(ar, skb);
+               return;
+       default:
+               break;
+       }
+
+       skb_queue_tail(&ar->wmi.wmi_event_list, skb);
+       queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
+}
+
+/* WMI Initialization functions */
+int ath10k_wmi_attach(struct ath10k *ar)
+{
+       init_completion(&ar->wmi.service_ready);
+       init_completion(&ar->wmi.unified_ready);
+       init_waitqueue_head(&ar->wmi.wq);
+
+       skb_queue_head_init(&ar->wmi.wmi_event_list);
+       INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
+
+       return 0;
+}
+
+void ath10k_wmi_detach(struct ath10k *ar)
+{
+       /* HTC should've drained the packets already */
+       if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
+               ath10k_warn("there are still pending packets\n");
+
+       cancel_work_sync(&ar->wmi.wmi_event_work);
+       skb_queue_purge(&ar->wmi.wmi_event_list);
+}
+
+int ath10k_wmi_connect_htc_service(struct ath10k *ar)
+{
+       int status;
+       struct ath10k_htc_svc_conn_req conn_req;
+       struct ath10k_htc_svc_conn_resp conn_resp;
+
+       memset(&conn_req, 0, sizeof(conn_req));
+       memset(&conn_resp, 0, sizeof(conn_resp));
+
+       /* these fields are the same for all service endpoints */
+       conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
+       conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+
+       /* connect to control service */
+       conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
+
+       status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp);
+       if (status) {
+               ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
+                           status);
+               return status;
+       }
+
+       ar->wmi.eid = conn_resp.eid;
+       return 0;
+}
+
+int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
+                                 u16 rd5g, u16 ctl2g, u16 ctl5g)
+{
+       struct wmi_pdev_set_regdomain_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+       cmd->reg_domain = __cpu_to_le32(rd);
+       cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+       cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+       cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+       cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
+                  rd, rd2g, rd5g, ctl2g, ctl5g);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+}
+
+int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
+                               const struct wmi_channel_arg *arg)
+{
+       struct wmi_set_channel_cmd *cmd;
+       struct sk_buff *skb;
+
+       if (arg->passive)
+               return -EINVAL;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_set_channel_cmd *)skb->data;
+       cmd->chan.mhz               = __cpu_to_le32(arg->freq);
+       cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
+       cmd->chan.mode              = arg->mode;
+       cmd->chan.min_power         = arg->min_power;
+       cmd->chan.max_power         = arg->max_power;
+       cmd->chan.reg_power         = arg->max_reg_power;
+       cmd->chan.reg_classid       = arg->reg_class_id;
+       cmd->chan.antenna_max       = arg->max_antenna_gain;
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi set channel mode %d freq %d\n",
+                  arg->mode, arg->freq);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
+}
+
+int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
+{
+       struct wmi_pdev_suspend_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+       cmd->suspend_opt = WMI_PDEV_SUSPEND;
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
+}
+
+int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
+{
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(0);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
+}
+
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
+                             u32 value)
+{
+       struct wmi_pdev_set_param_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+       cmd->param_id    = __cpu_to_le32(id);
+       cmd->param_value = __cpu_to_le32(value);
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
+                  id, value);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
+}
+
+int ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+       struct wmi_init_cmd *cmd;
+       struct sk_buff *buf;
+       struct wmi_resource_config config = {};
+       u32 val;
+
+       config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
+       config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
+       config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
+
+       config.num_offload_reorder_bufs =
+               __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
+
+       config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
+       config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
+       config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
+       config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
+       config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
+       config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
+       config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
+
+       config.scan_max_pending_reqs =
+               __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
+
+       config.bmiss_offload_max_vdev =
+               __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
+
+       config.roam_offload_max_vdev =
+               __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
+
+       config.roam_offload_max_ap_profiles =
+               __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+       config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
+       config.num_mcast_table_elems =
+               __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
+
+       config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
+       config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
+       config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
+       config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
+       config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
+
+       val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+       config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+       config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
+
+       config.gtk_offload_max_vdev =
+               __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
+
+       config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
+       config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
+
+       buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!buf)
+               return -ENOMEM;
+
+       cmd = (struct wmi_init_cmd *)buf->data;
+       cmd->num_host_mem_chunks = 0;
+       memcpy(&cmd->resource_config, &config, sizeof(config));
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
+       return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
+}
+
+static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
+{
+       int len;
+
+       len = sizeof(struct wmi_start_scan_cmd);
+
+       if (arg->ie_len) {
+               if (!arg->ie)
+                       return -EINVAL;
+               if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
+                       return -EINVAL;
+
+               len += sizeof(struct wmi_ie_data);
+               len += roundup(arg->ie_len, 4);
+       }
+
+       if (arg->n_channels) {
+               if (!arg->channels)
+                       return -EINVAL;
+               if (arg->n_channels > ARRAY_SIZE(arg->channels))
+                       return -EINVAL;
+
+               len += sizeof(struct wmi_chan_list);
+               len += sizeof(__le32) * arg->n_channels;
+       }
+
+       if (arg->n_ssids) {
+               if (!arg->ssids)
+                       return -EINVAL;
+               if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
+                       return -EINVAL;
+
+               len += sizeof(struct wmi_ssid_list);
+               len += sizeof(struct wmi_ssid) * arg->n_ssids;
+       }
+
+       if (arg->n_bssids) {
+               if (!arg->bssids)
+                       return -EINVAL;
+               if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
+                       return -EINVAL;
+
+               len += sizeof(struct wmi_bssid_list);
+               len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+       }
+
+       return len;
+}
+
+int ath10k_wmi_start_scan(struct ath10k *ar,
+                         const struct wmi_start_scan_arg *arg)
+{
+       struct wmi_start_scan_cmd *cmd;
+       struct sk_buff *skb;
+       struct wmi_ie_data *ie;
+       struct wmi_chan_list *channels;
+       struct wmi_ssid_list *ssids;
+       struct wmi_bssid_list *bssids;
+       u32 scan_id;
+       u32 scan_req_id;
+       int off;
+       int len = 0;
+       int i;
+
+       len = ath10k_wmi_start_scan_calc_len(arg);
+       if (len < 0)
+               return len; /* len contains error code here */
+
+       skb = ath10k_wmi_alloc_skb(len);
+       if (!skb)
+               return -ENOMEM;
+
+       scan_id  = WMI_HOST_SCAN_REQ_ID_PREFIX;
+       scan_id |= arg->scan_id;
+
+       scan_req_id  = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+       scan_req_id |= arg->scan_req_id;
+
+       cmd = (struct wmi_start_scan_cmd *)skb->data;
+       cmd->scan_id            = __cpu_to_le32(scan_id);
+       cmd->scan_req_id        = __cpu_to_le32(scan_req_id);
+       cmd->vdev_id            = __cpu_to_le32(arg->vdev_id);
+       cmd->scan_priority      = __cpu_to_le32(arg->scan_priority);
+       cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
+       cmd->dwell_time_active  = __cpu_to_le32(arg->dwell_time_active);
+       cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
+       cmd->min_rest_time      = __cpu_to_le32(arg->min_rest_time);
+       cmd->max_rest_time      = __cpu_to_le32(arg->max_rest_time);
+       cmd->repeat_probe_time  = __cpu_to_le32(arg->repeat_probe_time);
+       cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
+       cmd->idle_time          = __cpu_to_le32(arg->idle_time);
+       cmd->max_scan_time      = __cpu_to_le32(arg->max_scan_time);
+       cmd->probe_delay        = __cpu_to_le32(arg->probe_delay);
+       cmd->scan_ctrl_flags    = __cpu_to_le32(arg->scan_ctrl_flags);
+
+       /* TLV list starts after fields included in the struct */
+       off = sizeof(*cmd);
+
+       if (arg->n_channels) {
+               channels = (void *)skb->data + off;
+               channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
+               channels->num_chan = __cpu_to_le32(arg->n_channels);
+
+               for (i = 0; i < arg->n_channels; i++)
+                       channels->channel_list[i] =
+                               __cpu_to_le32(arg->channels[i]);
+
+               off += sizeof(*channels);
+               off += sizeof(__le32) * arg->n_channels;
+       }
+
+       if (arg->n_ssids) {
+               ssids = (void *)skb->data + off;
+               ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
+               ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
+
+               for (i = 0; i < arg->n_ssids; i++) {
+                       ssids->ssids[i].ssid_len =
+                               __cpu_to_le32(arg->ssids[i].len);
+                       memcpy(&ssids->ssids[i].ssid,
+                              arg->ssids[i].ssid,
+                              arg->ssids[i].len);
+               }
+
+               off += sizeof(*ssids);
+               off += sizeof(struct wmi_ssid) * arg->n_ssids;
+       }
+
+       if (arg->n_bssids) {
+               bssids = (void *)skb->data + off;
+               bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
+               bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
+
+               for (i = 0; i < arg->n_bssids; i++)
+                       memcpy(&bssids->bssid_list[i],
+                              arg->bssids[i].bssid,
+                              ETH_ALEN);
+
+               off += sizeof(*bssids);
+               off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+       }
+
+       if (arg->ie_len) {
+               ie = (void *)skb->data + off;
+               ie->tag = __cpu_to_le32(WMI_IE_TAG);
+               ie->ie_len = __cpu_to_le32(arg->ie_len);
+               memcpy(ie->ie_data, arg->ie, arg->ie_len);
+
+               off += sizeof(*ie);
+               off += roundup(arg->ie_len, 4);
+       }
+
+       if (off != skb->len) {
+               dev_kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
+       return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
+}
+
+void ath10k_wmi_start_scan_init(struct ath10k *ar,
+                               struct wmi_start_scan_arg *arg)
+{
+       /* setup commonly used values */
+       arg->scan_req_id = 1;
+       arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+       arg->dwell_time_active = 50;
+       arg->dwell_time_passive = 150;
+       arg->min_rest_time = 50;
+       arg->max_rest_time = 500;
+       arg->repeat_probe_time = 0;
+       arg->probe_spacing_time = 0;
+       arg->idle_time = 0;
+       arg->max_scan_time = 5000;
+       arg->probe_delay = 5;
+       arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
+               | WMI_SCAN_EVENT_COMPLETED
+               | WMI_SCAN_EVENT_BSS_CHANNEL
+               | WMI_SCAN_EVENT_FOREIGN_CHANNEL
+               | WMI_SCAN_EVENT_DEQUEUED;
+       arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
+       arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+       arg->n_bssids = 1;
+       arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
+}
+
+int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
+{
+       struct wmi_stop_scan_cmd *cmd;
+       struct sk_buff *skb;
+       u32 scan_id;
+       u32 req_id;
+
+       if (arg->req_id > 0xFFF)
+               return -EINVAL;
+       if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
+               return -EINVAL;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       scan_id = arg->u.scan_id;
+       scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
+
+       req_id = arg->req_id;
+       req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+
+       cmd = (struct wmi_stop_scan_cmd *)skb->data;
+       cmd->req_type    = __cpu_to_le32(arg->req_type);
+       cmd->vdev_id     = __cpu_to_le32(arg->u.vdev_id);
+       cmd->scan_id     = __cpu_to_le32(scan_id);
+       cmd->scan_req_id = __cpu_to_le32(req_id);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
+                  arg->req_id, arg->req_type, arg->u.scan_id);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
+}
+
+int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
+                          enum wmi_vdev_type type,
+                          enum wmi_vdev_subtype subtype,
+                          const u8 macaddr[ETH_ALEN])
+{
+       struct wmi_vdev_create_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_create_cmd *)skb->data;
+       cmd->vdev_id      = __cpu_to_le32(vdev_id);
+       cmd->vdev_type    = __cpu_to_le32(type);
+       cmd->vdev_subtype = __cpu_to_le32(subtype);
+       memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
+                  vdev_id, type, subtype, macaddr);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
+}
+
+int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+       struct wmi_vdev_delete_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "WMI vdev delete id %d\n", vdev_id);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
+}
+
+static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
+                               const struct wmi_vdev_start_request_arg *arg,
+                               enum wmi_cmd_id cmd_id)
+{
+       struct wmi_vdev_start_request_cmd *cmd;
+       struct sk_buff *skb;
+       const char *cmdname;
+       u32 flags = 0;
+
+       if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
+           cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
+               return -EINVAL;
+       if (WARN_ON(arg->ssid && arg->ssid_len == 0))
+               return -EINVAL;
+       if (WARN_ON(arg->hidden_ssid && !arg->ssid))
+               return -EINVAL;
+       if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+               return -EINVAL;
+
+       if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
+               cmdname = "start";
+       else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
+               cmdname = "restart";
+       else
+               return -EINVAL; /* should not happen, we already check cmd_id */
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       if (arg->hidden_ssid)
+               flags |= WMI_VDEV_START_HIDDEN_SSID;
+       if (arg->pmf_enabled)
+               flags |= WMI_VDEV_START_PMF_ENABLED;
+
+       cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+       cmd->vdev_id         = __cpu_to_le32(arg->vdev_id);
+       cmd->disable_hw_ack  = __cpu_to_le32(arg->disable_hw_ack);
+       cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
+       cmd->dtim_period     = __cpu_to_le32(arg->dtim_period);
+       cmd->flags           = __cpu_to_le32(flags);
+       cmd->bcn_tx_rate     = __cpu_to_le32(arg->bcn_tx_rate);
+       cmd->bcn_tx_power    = __cpu_to_le32(arg->bcn_tx_power);
+
+       if (arg->ssid) {
+               cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
+               memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+       }
+
+       cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
+
+       cmd->chan.band_center_freq1 =
+               __cpu_to_le32(arg->channel.band_center_freq1);
+
+       cmd->chan.mode = arg->channel.mode;
+       cmd->chan.min_power = arg->channel.min_power;
+       cmd->chan.max_power = arg->channel.max_power;
+       cmd->chan.reg_power = arg->channel.max_reg_power;
+       cmd->chan.reg_classid = arg->channel.reg_class_id;
+       cmd->chan.antenna_max = arg->channel.max_antenna_gain;
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi vdev %s id 0x%x freq %d, mode %d, ch_flags: 0x%0X,"
+                  "max_power: %d\n", cmdname, arg->vdev_id, arg->channel.freq,
+                  arg->channel.mode, flags, arg->channel.max_power);
+
+       return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+int ath10k_wmi_vdev_start(struct ath10k *ar,
+                         const struct wmi_vdev_start_request_arg *arg)
+{
+       return ath10k_wmi_vdev_start_restart(ar, arg,
+                                            WMI_VDEV_START_REQUEST_CMDID);
+}
+
+int ath10k_wmi_vdev_restart(struct ath10k *ar,
+                    const struct wmi_vdev_start_request_arg *arg)
+{
+       return ath10k_wmi_vdev_start_restart(ar, arg,
+                                            WMI_VDEV_RESTART_REQUEST_CMDID);
+}
+
+int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+       struct wmi_vdev_stop_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
+}
+
+int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+       struct wmi_vdev_up_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_up_cmd *)skb->data;
+       cmd->vdev_id       = __cpu_to_le32(vdev_id);
+       cmd->vdev_assoc_id = __cpu_to_le32(aid);
+       memcpy(&cmd->vdev_bssid.addr, bssid, 6);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
+                  vdev_id, aid, bssid);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
+}
+
+int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+       struct wmi_vdev_down_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_down_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi mgmt vdev down id 0x%x\n", vdev_id);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
+}
+
+int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+                             enum wmi_vdev_param param_id, u32 param_value)
+{
+       struct wmi_vdev_set_param_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+       cmd->vdev_id     = __cpu_to_le32(vdev_id);
+       cmd->param_id    = __cpu_to_le32(param_id);
+       cmd->param_value = __cpu_to_le32(param_value);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi vdev id 0x%x set param %d value %d\n",
+                  vdev_id, param_id, param_value);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
+}
+
+int ath10k_wmi_vdev_install_key(struct ath10k *ar,
+                               const struct wmi_vdev_install_key_arg *arg)
+{
+       struct wmi_vdev_install_key_cmd *cmd;
+       struct sk_buff *skb;
+
+       if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
+               return -EINVAL;
+       if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
+               return -EINVAL;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len);
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+       cmd->vdev_id       = __cpu_to_le32(arg->vdev_id);
+       cmd->key_idx       = __cpu_to_le32(arg->key_idx);
+       cmd->key_flags     = __cpu_to_le32(arg->key_flags);
+       cmd->key_cipher    = __cpu_to_le32(arg->key_cipher);
+       cmd->key_len       = __cpu_to_le32(arg->key_len);
+       cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
+       cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
+
+       if (arg->macaddr)
+               memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN);
+       if (arg->key_data)
+               memcpy(cmd->key_data, arg->key_data, arg->key_len);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+}
+
+int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
+                          const u8 peer_addr[ETH_ALEN])
+{
+       struct wmi_peer_create_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_peer_create_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi peer create vdev_id %d peer_addr %pM\n",
+                  vdev_id, peer_addr);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
+}
+
+int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
+                          const u8 peer_addr[ETH_ALEN])
+{
+       struct wmi_peer_delete_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_peer_delete_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi peer delete vdev_id %d peer_addr %pM\n",
+                  vdev_id, peer_addr);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
+}
+
+int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
+                         const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+       struct wmi_peer_flush_tids_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+       cmd->vdev_id         = __cpu_to_le32(vdev_id);
+       cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
+       memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
+                  vdev_id, peer_addr, tid_bitmap);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+}
+
+int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
+                             const u8 *peer_addr, enum wmi_peer_param param_id,
+                             u32 param_value)
+{
+       struct wmi_peer_set_param_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+       cmd->vdev_id     = __cpu_to_le32(vdev_id);
+       cmd->param_id    = __cpu_to_le32(param_id);
+       cmd->param_value = __cpu_to_le32(param_value);
+       memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi vdev %d peer 0x%pM set param %d value %d\n",
+                  vdev_id, peer_addr, param_id, param_value);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
+}
+
+int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
+                         enum wmi_sta_ps_mode psmode)
+{
+       struct wmi_sta_powersave_mode_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
+       cmd->vdev_id     = __cpu_to_le32(vdev_id);
+       cmd->sta_ps_mode = __cpu_to_le32(psmode);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi set powersave id 0x%x mode %d\n",
+                  vdev_id, psmode);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+}
+
+int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
+                               enum wmi_sta_powersave_param param_id,
+                               u32 value)
+{
+       struct wmi_sta_powersave_param_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+       cmd->vdev_id     = __cpu_to_le32(vdev_id);
+       cmd->param_id    = __cpu_to_le32(param_id);
+       cmd->param_value = __cpu_to_le32(value);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi sta ps param vdev_id 0x%x param %d value %d\n",
+                  vdev_id, param_id, value);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+}
+
+int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+                              enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+       struct wmi_ap_ps_peer_cmd *cmd;
+       struct sk_buff *skb;
+
+       if (!mac)
+               return -EINVAL;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       cmd->param_id = __cpu_to_le32(param_id);
+       cmd->param_value = __cpu_to_le32(value);
+       memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
+
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
+                  vdev_id, param_id, value, mac);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+}
+
+int ath10k_wmi_scan_chan_list(struct ath10k *ar,
+                             const struct wmi_scan_chan_list_arg *arg)
+{
+       struct wmi_scan_chan_list_cmd *cmd;
+       struct sk_buff *skb;
+       struct wmi_channel_arg *ch;
+       struct wmi_channel *ci;
+       int len;
+       int i;
+
+       len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
+
+       skb = ath10k_wmi_alloc_skb(len);
+       if (!skb)
+               return -EINVAL;
+
+       cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+       cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
+
+       for (i = 0; i < arg->n_channels; i++) {
+               u32 flags = 0;
+
+               ch = &arg->channels[i];
+               ci = &cmd->chan_info[i];
+
+               if (ch->passive)
+                       flags |= WMI_CHAN_FLAG_PASSIVE;
+               if (ch->allow_ibss)
+                       flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
+               if (ch->allow_ht)
+                       flags |= WMI_CHAN_FLAG_ALLOW_HT;
+               if (ch->allow_vht)
+                       flags |= WMI_CHAN_FLAG_ALLOW_VHT;
+               if (ch->ht40plus)
+                       flags |= WMI_CHAN_FLAG_HT40_PLUS;
+
+               ci->mhz               = __cpu_to_le32(ch->freq);
+               ci->band_center_freq1 = __cpu_to_le32(ch->freq);
+               ci->band_center_freq2 = 0;
+               ci->min_power         = ch->min_power;
+               ci->max_power         = ch->max_power;
+               ci->reg_power         = ch->max_reg_power;
+               ci->antenna_max       = ch->max_antenna_gain;
+               ci->antenna_max       = 0;
+
+               /* mode & flags share storage */
+               ci->mode              = ch->mode;
+               ci->flags            |= __cpu_to_le32(flags);
+       }
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
+}
+
+int ath10k_wmi_peer_assoc(struct ath10k *ar,
+                         const struct wmi_peer_assoc_complete_arg *arg)
+{
+       struct wmi_peer_assoc_complete_cmd *cmd;
+       struct sk_buff *skb;
+
+       if (arg->peer_mpdu_density > 16)
+               return -EINVAL;
+       if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
+               return -EINVAL;
+       if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
+               return -EINVAL;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data;
+       cmd->vdev_id            = __cpu_to_le32(arg->vdev_id);
+       cmd->peer_new_assoc     = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
+       cmd->peer_associd       = __cpu_to_le32(arg->peer_aid);
+       cmd->peer_flags         = __cpu_to_le32(arg->peer_flags);
+       cmd->peer_caps          = __cpu_to_le32(arg->peer_caps);
+       cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
+       cmd->peer_ht_caps       = __cpu_to_le32(arg->peer_ht_caps);
+       cmd->peer_max_mpdu      = __cpu_to_le32(arg->peer_max_mpdu);
+       cmd->peer_mpdu_density  = __cpu_to_le32(arg->peer_mpdu_density);
+       cmd->peer_rate_caps     = __cpu_to_le32(arg->peer_rate_caps);
+       cmd->peer_nss           = __cpu_to_le32(arg->peer_num_spatial_streams);
+       cmd->peer_vht_caps      = __cpu_to_le32(arg->peer_vht_caps);
+       cmd->peer_phymode       = __cpu_to_le32(arg->peer_phymode);
+
+       memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN);
+
+       cmd->peer_legacy_rates.num_rates =
+               __cpu_to_le32(arg->peer_legacy_rates.num_rates);
+       memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
+              arg->peer_legacy_rates.num_rates);
+
+       cmd->peer_ht_rates.num_rates =
+               __cpu_to_le32(arg->peer_ht_rates.num_rates);
+       memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
+              arg->peer_ht_rates.num_rates);
+
+       cmd->peer_vht_rates.rx_max_rate =
+               __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
+       cmd->peer_vht_rates.rx_mcs_set =
+               __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
+       cmd->peer_vht_rates.tx_max_rate =
+               __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
+       cmd->peer_vht_rates.tx_mcs_set =
+               __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
+}
+
+int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
+{
+       struct wmi_bcn_tx_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_bcn_tx_cmd *)skb->data;
+       cmd->hdr.vdev_id  = __cpu_to_le32(arg->vdev_id);
+       cmd->hdr.tx_rate  = __cpu_to_le32(arg->tx_rate);
+       cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power);
+       cmd->hdr.bcn_len  = __cpu_to_le32(arg->bcn_len);
+       memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
+
+       return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
+}
+
+static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
+                                         const struct wmi_wmm_params_arg *arg)
+{
+       params->cwmin  = __cpu_to_le32(arg->cwmin);
+       params->cwmax  = __cpu_to_le32(arg->cwmax);
+       params->aifs   = __cpu_to_le32(arg->aifs);
+       params->txop   = __cpu_to_le32(arg->txop);
+       params->acm    = __cpu_to_le32(arg->acm);
+       params->no_ack = __cpu_to_le32(arg->no_ack);
+}
+
+int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
+                       const struct wmi_pdev_set_wmm_params_arg *arg)
+{
+       struct wmi_pdev_set_wmm_params *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
+       ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
+       ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
+       ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
+       ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
+       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
+}
+
+int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
+{
+       struct wmi_request_stats_cmd *cmd;
+       struct sk_buff *skb;
+
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_request_stats_cmd *)skb->data;
+       cmd->stats_id = __cpu_to_le32(stats_id);
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
+       return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
new file mode 100644 (file)
index 0000000..9555f5a
--- /dev/null
@@ -0,0 +1,3052 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WMI_H_
+#define _WMI_H_
+
+#include <linux/types.h>
+#include <net/mac80211.h>
+
+/*
+ * This file specifies the WMI interface for the Unified Software
+ * Architecture.
+ *
+ * It includes definitions of all the commands and events. Commands are
+ * messages from the host to the target. Events and Replies are messages
+ * from the target to the host.
+ *
+ * Ownership of correctness in regards to WMI commands belongs to the host
+ * driver and the target is not required to validate parameters for value,
+ * proper range, or any other checking.
+ *
+ * Guidelines for extending this interface are below.
+ *
+ * 1. Add new WMI commands ONLY within the specified range - 0x9000 - 0x9fff
+ *
+ * 2. Use ONLY u32 type for defining member variables within WMI
+ *    command/event structures. Do not use u8, u16, bool or
+ *    enum types within these structures.
+ *
+ * 3. DO NOT define bit fields within structures. Implement bit fields
+ *    using masks if necessary. Do not use the programming language's bit
+ *    field definition.
+ *
+ * 4. Define macros for encode/decode of u8, u16 fields within
+ *    the u32 variables. Use these macros for set/get of these fields.
+ *    Try to use this to optimize the structure without bloating it with
+ *    u32 variables for every lower sized field.
+ *
+ * 5. Do not use PACK/UNPACK attributes for the structures as each member
+ *    variable is already 4-byte aligned by virtue of being a u32
+ *    type.
+ *
+ * 6. Comment each parameter part of the WMI command/event structure by
+ *    using the 2 stars at the begining of C comment instead of one star to
+ *    enable HTML document generation using Doxygen.
+ *
+ */
+
+/* Control Path */
+struct wmi_cmd_hdr {
+       __le32 cmd_id;
+} __packed;
+
+#define WMI_CMD_HDR_CMD_ID_MASK   0x00FFFFFF
+#define WMI_CMD_HDR_CMD_ID_LSB    0
+#define WMI_CMD_HDR_PLT_PRIV_MASK 0xFF000000
+#define WMI_CMD_HDR_PLT_PRIV_LSB  24
+
+#define HTC_PROTOCOL_VERSION    0x0002
+#define WMI_PROTOCOL_VERSION    0x0002
+
+enum wmi_service_id {
+       WMI_SERVICE_BEACON_OFFLOAD = 0,   /* beacon offload */
+       WMI_SERVICE_SCAN_OFFLOAD,         /* scan offload */
+       WMI_SERVICE_ROAM_OFFLOAD,         /* roam offload */
+       WMI_SERVICE_BCN_MISS_OFFLOAD,     /* beacon miss offload */
+       WMI_SERVICE_STA_PWRSAVE,          /* fake sleep + basic power save */
+       WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */
+       WMI_SERVICE_AP_UAPSD,             /* uapsd on AP */
+       WMI_SERVICE_AP_DFS,               /* DFS on AP */
+       WMI_SERVICE_11AC,                 /* supports 11ac */
+       WMI_SERVICE_BLOCKACK,   /* Supports triggering ADDBA/DELBA from host*/
+       WMI_SERVICE_PHYERR,               /* PHY error */
+       WMI_SERVICE_BCN_FILTER,           /* Beacon filter support */
+       WMI_SERVICE_RTT,                  /* RTT (round trip time) support */
+       WMI_SERVICE_RATECTRL,             /* Rate-control */
+       WMI_SERVICE_WOW,                  /* WOW Support */
+       WMI_SERVICE_RATECTRL_CACHE,       /* Rate-control caching */
+       WMI_SERVICE_IRAM_TIDS,            /* TIDs in IRAM */
+       WMI_SERVICE_ARPNS_OFFLOAD,        /* ARP NS Offload support */
+       WMI_SERVICE_NLO,                  /* Network list offload service */
+       WMI_SERVICE_GTK_OFFLOAD,          /* GTK offload */
+       WMI_SERVICE_SCAN_SCH,             /* Scan Scheduler Service */
+       WMI_SERVICE_CSA_OFFLOAD,          /* CSA offload service */
+       WMI_SERVICE_CHATTER,              /* Chatter service */
+       WMI_SERVICE_COEX_FREQAVOID,       /* FW report freq range to avoid */
+       WMI_SERVICE_PACKET_POWER_SAVE,    /* packet power save service */
+       WMI_SERVICE_FORCE_FW_HANG,        /* To test fw recovery mechanism */
+       WMI_SERVICE_GPIO,                 /* GPIO service */
+       WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */
+       WMI_STA_UAPSD_BASIC_AUTO_TRIG,    /* UAPSD AC Trigger Generation  */
+       WMI_STA_UAPSD_VAR_AUTO_TRIG,      /* -do- */
+       WMI_SERVICE_STA_KEEP_ALIVE,       /* STA keep alive mechanism support */
+       WMI_SERVICE_TX_ENCAP,             /* Packet type for TX encapsulation */
+
+       WMI_SERVICE_LAST,
+       WMI_MAX_SERVICE = 64              /* max service */
+};
+
+static inline char *wmi_service_name(int service_id)
+{
+       switch (service_id) {
+       case WMI_SERVICE_BEACON_OFFLOAD:
+               return "BEACON_OFFLOAD";
+       case WMI_SERVICE_SCAN_OFFLOAD:
+               return "SCAN_OFFLOAD";
+       case WMI_SERVICE_ROAM_OFFLOAD:
+               return "ROAM_OFFLOAD";
+       case WMI_SERVICE_BCN_MISS_OFFLOAD:
+               return "BCN_MISS_OFFLOAD";
+       case WMI_SERVICE_STA_PWRSAVE:
+               return "STA_PWRSAVE";
+       case WMI_SERVICE_STA_ADVANCED_PWRSAVE:
+               return "STA_ADVANCED_PWRSAVE";
+       case WMI_SERVICE_AP_UAPSD:
+               return "AP_UAPSD";
+       case WMI_SERVICE_AP_DFS:
+               return "AP_DFS";
+       case WMI_SERVICE_11AC:
+               return "11AC";
+       case WMI_SERVICE_BLOCKACK:
+               return "BLOCKACK";
+       case WMI_SERVICE_PHYERR:
+               return "PHYERR";
+       case WMI_SERVICE_BCN_FILTER:
+               return "BCN_FILTER";
+       case WMI_SERVICE_RTT:
+               return "RTT";
+       case WMI_SERVICE_RATECTRL:
+               return "RATECTRL";
+       case WMI_SERVICE_WOW:
+               return "WOW";
+       case WMI_SERVICE_RATECTRL_CACHE:
+               return "RATECTRL CACHE";
+       case WMI_SERVICE_IRAM_TIDS:
+               return "IRAM TIDS";
+       case WMI_SERVICE_ARPNS_OFFLOAD:
+               return "ARPNS_OFFLOAD";
+       case WMI_SERVICE_NLO:
+               return "NLO";
+       case WMI_SERVICE_GTK_OFFLOAD:
+               return "GTK_OFFLOAD";
+       case WMI_SERVICE_SCAN_SCH:
+               return "SCAN_SCH";
+       case WMI_SERVICE_CSA_OFFLOAD:
+               return "CSA_OFFLOAD";
+       case WMI_SERVICE_CHATTER:
+               return "CHATTER";
+       case WMI_SERVICE_COEX_FREQAVOID:
+               return "COEX_FREQAVOID";
+       case WMI_SERVICE_PACKET_POWER_SAVE:
+               return "PACKET_POWER_SAVE";
+       case WMI_SERVICE_FORCE_FW_HANG:
+               return "FORCE FW HANG";
+       case WMI_SERVICE_GPIO:
+               return "GPIO";
+       case WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM:
+               return "MODULATED DTIM";
+       case WMI_STA_UAPSD_BASIC_AUTO_TRIG:
+               return "BASIC UAPSD";
+       case WMI_STA_UAPSD_VAR_AUTO_TRIG:
+               return "VAR UAPSD";
+       case WMI_SERVICE_STA_KEEP_ALIVE:
+               return "STA KEEP ALIVE";
+       case WMI_SERVICE_TX_ENCAP:
+               return "TX ENCAP";
+       default:
+               return "UNKNOWN SERVICE\n";
+       }
+}
+
+
+#define WMI_SERVICE_BM_SIZE \
+       ((WMI_MAX_SERVICE + sizeof(u32) - 1)/sizeof(u32))
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+       union {
+               u8 addr[6];
+               struct {
+                       u32 word0;
+                       u32 word1;
+               } __packed;
+       } __packed;
+} __packed;
+
+/* macro to convert MAC address from WMI word format to char array */
+#define WMI_MAC_ADDR_TO_CHAR_ARRAY(pwmi_mac_addr, c_macaddr) do { \
+       (c_macaddr)[0] =  ((pwmi_mac_addr)->word0) & 0xff; \
+       (c_macaddr)[1] = (((pwmi_mac_addr)->word0) >> 8) & 0xff; \
+       (c_macaddr)[2] = (((pwmi_mac_addr)->word0) >> 16) & 0xff; \
+       (c_macaddr)[3] = (((pwmi_mac_addr)->word0) >> 24) & 0xff; \
+       (c_macaddr)[4] =  ((pwmi_mac_addr)->word1) & 0xff; \
+       (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
+       } while (0)
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+       /* 0 to 2 are reserved */
+       WMI_GRP_START = 0x3,
+       WMI_GRP_SCAN = WMI_GRP_START,
+       WMI_GRP_PDEV,
+       WMI_GRP_VDEV,
+       WMI_GRP_PEER,
+       WMI_GRP_MGMT,
+       WMI_GRP_BA_NEG,
+       WMI_GRP_STA_PS,
+       WMI_GRP_DFS,
+       WMI_GRP_ROAM,
+       WMI_GRP_OFL_SCAN,
+       WMI_GRP_P2P,
+       WMI_GRP_AP_PS,
+       WMI_GRP_RATE_CTRL,
+       WMI_GRP_PROFILE,
+       WMI_GRP_SUSPEND,
+       WMI_GRP_BCN_FILTER,
+       WMI_GRP_WOW,
+       WMI_GRP_RTT,
+       WMI_GRP_SPECTRAL,
+       WMI_GRP_STATS,
+       WMI_GRP_ARP_NS_OFL,
+       WMI_GRP_NLO_OFL,
+       WMI_GRP_GTK_OFL,
+       WMI_GRP_CSA_OFL,
+       WMI_GRP_CHATTER,
+       WMI_GRP_TID_ADDBA,
+       WMI_GRP_MISC,
+       WMI_GRP_GPIO,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+/* Command IDs and commande events. */
+enum wmi_cmd_id {
+       WMI_INIT_CMDID = 0x1,
+
+       /* Scan specific commands */
+       WMI_START_SCAN_CMDID = WMI_CMD_GRP(WMI_GRP_SCAN),
+       WMI_STOP_SCAN_CMDID,
+       WMI_SCAN_CHAN_LIST_CMDID,
+       WMI_SCAN_SCH_PRIO_TBL_CMDID,
+
+       /* PDEV (physical device) specific commands */
+       WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_CMD_GRP(WMI_GRP_PDEV),
+       WMI_PDEV_SET_CHANNEL_CMDID,
+       WMI_PDEV_SET_PARAM_CMDID,
+       WMI_PDEV_PKTLOG_ENABLE_CMDID,
+       WMI_PDEV_PKTLOG_DISABLE_CMDID,
+       WMI_PDEV_SET_WMM_PARAMS_CMDID,
+       WMI_PDEV_SET_HT_CAP_IE_CMDID,
+       WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+       WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+       WMI_PDEV_SET_QUIET_MODE_CMDID,
+       WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+       WMI_PDEV_GET_TPC_CONFIG_CMDID,
+       WMI_PDEV_SET_BASE_MACADDR_CMDID,
+
+       /* VDEV (virtual device) specific commands */
+       WMI_VDEV_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_VDEV),
+       WMI_VDEV_DELETE_CMDID,
+       WMI_VDEV_START_REQUEST_CMDID,
+       WMI_VDEV_RESTART_REQUEST_CMDID,
+       WMI_VDEV_UP_CMDID,
+       WMI_VDEV_STOP_CMDID,
+       WMI_VDEV_DOWN_CMDID,
+       WMI_VDEV_SET_PARAM_CMDID,
+       WMI_VDEV_INSTALL_KEY_CMDID,
+
+       /* peer specific commands */
+       WMI_PEER_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_PEER),
+       WMI_PEER_DELETE_CMDID,
+       WMI_PEER_FLUSH_TIDS_CMDID,
+       WMI_PEER_SET_PARAM_CMDID,
+       WMI_PEER_ASSOC_CMDID,
+       WMI_PEER_ADD_WDS_ENTRY_CMDID,
+       WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+       WMI_PEER_MCAST_GROUP_CMDID,
+
+       /* beacon/management specific commands */
+       WMI_BCN_TX_CMDID = WMI_CMD_GRP(WMI_GRP_MGMT),
+       WMI_PDEV_SEND_BCN_CMDID,
+       WMI_BCN_TMPL_CMDID,
+       WMI_BCN_FILTER_RX_CMDID,
+       WMI_PRB_REQ_FILTER_RX_CMDID,
+       WMI_MGMT_TX_CMDID,
+       WMI_PRB_TMPL_CMDID,
+
+       /* commands to directly control BA negotiation directly from host. */
+       WMI_ADDBA_CLEAR_RESP_CMDID = WMI_CMD_GRP(WMI_GRP_BA_NEG),
+       WMI_ADDBA_SEND_CMDID,
+       WMI_ADDBA_STATUS_CMDID,
+       WMI_DELBA_SEND_CMDID,
+       WMI_ADDBA_SET_RESP_CMDID,
+       WMI_SEND_SINGLEAMSDU_CMDID,
+
+       /* Station power save specific config */
+       WMI_STA_POWERSAVE_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_STA_PS),
+       WMI_STA_POWERSAVE_PARAM_CMDID,
+       WMI_STA_MIMO_PS_MODE_CMDID,
+
+       /** DFS-specific commands */
+       WMI_PDEV_DFS_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_DFS),
+       WMI_PDEV_DFS_DISABLE_CMDID,
+
+       /* Roaming specific  commands */
+       WMI_ROAM_SCAN_MODE = WMI_CMD_GRP(WMI_GRP_ROAM),
+       WMI_ROAM_SCAN_RSSI_THRESHOLD,
+       WMI_ROAM_SCAN_PERIOD,
+       WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+       WMI_ROAM_AP_PROFILE,
+
+       /* offload scan specific commands */
+       WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_CMD_GRP(WMI_GRP_OFL_SCAN),
+       WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+       WMI_OFL_SCAN_PERIOD,
+
+       /* P2P specific commands */
+       WMI_P2P_DEV_SET_DEVICE_INFO = WMI_CMD_GRP(WMI_GRP_P2P),
+       WMI_P2P_DEV_SET_DISCOVERABILITY,
+       WMI_P2P_GO_SET_BEACON_IE,
+       WMI_P2P_GO_SET_PROBE_RESP_IE,
+       WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+
+       /* AP power save specific config */
+       WMI_AP_PS_PEER_PARAM_CMDID = WMI_CMD_GRP(WMI_GRP_AP_PS),
+       WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+       /* Rate-control specific commands */
+       WMI_PEER_RATE_RETRY_SCHED_CMDID =
+       WMI_CMD_GRP(WMI_GRP_RATE_CTRL),
+
+       /* WLAN Profiling commands. */
+       WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_CMD_GRP(WMI_GRP_PROFILE),
+       WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+       WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+       WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+       WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+       /* Suspend resume command Ids */
+       WMI_PDEV_SUSPEND_CMDID = WMI_CMD_GRP(WMI_GRP_SUSPEND),
+       WMI_PDEV_RESUME_CMDID,
+
+       /* Beacon filter commands */
+       WMI_ADD_BCN_FILTER_CMDID = WMI_CMD_GRP(WMI_GRP_BCN_FILTER),
+       WMI_RMV_BCN_FILTER_CMDID,
+
+       /* WOW Specific WMI commands*/
+       WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_CMD_GRP(WMI_GRP_WOW),
+       WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+       WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+       WMI_WOW_ENABLE_CMDID,
+       WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+       /* RTT measurement related cmd */
+       WMI_RTT_MEASREQ_CMDID = WMI_CMD_GRP(WMI_GRP_RTT),
+       WMI_RTT_TSF_CMDID,
+
+       /* spectral scan commands */
+       WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_CMD_GRP(WMI_GRP_SPECTRAL),
+       WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+
+       /* F/W stats */
+       WMI_REQUEST_STATS_CMDID = WMI_CMD_GRP(WMI_GRP_STATS),
+
+       /* ARP OFFLOAD REQUEST*/
+       WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_ARP_NS_OFL),
+
+       /* NS offload confid*/
+       WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_NLO_OFL),
+
+       /* GTK offload Specific WMI commands*/
+       WMI_GTK_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_GTK_OFL),
+
+       /* CSA offload Specific WMI commands*/
+       WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_CSA_OFL),
+       WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+
+       /* Chatter commands*/
+       WMI_CHATTER_SET_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_CHATTER),
+
+       /* addba specific commands */
+       WMI_PEER_TID_ADDBA_CMDID = WMI_CMD_GRP(WMI_GRP_TID_ADDBA),
+       WMI_PEER_TID_DELBA_CMDID,
+
+       /* set station mimo powersave method */
+       WMI_STA_DTIM_PS_METHOD_CMDID,
+       /* Configure the Station UAPSD AC Auto Trigger Parameters */
+       WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+
+       /* STA Keep alive parameter configuration,
+          Requires WMI_SERVICE_STA_KEEP_ALIVE */
+       WMI_STA_KEEPALIVE_CMD,
+
+       /* misc command group */
+       WMI_ECHO_CMDID = WMI_CMD_GRP(WMI_GRP_MISC),
+       WMI_PDEV_UTF_CMDID,
+       WMI_DBGLOG_CFG_CMDID,
+       WMI_PDEV_QVIT_CMDID,
+       WMI_PDEV_FTM_INTG_CMDID,
+       WMI_VDEV_SET_KEEPALIVE_CMDID,
+       WMI_VDEV_GET_KEEPALIVE_CMDID,
+
+       /* GPIO Configuration */
+       WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
+       WMI_GPIO_OUTPUT_CMDID,
+};
+
+enum wmi_event_id {
+       WMI_SERVICE_READY_EVENTID = 0x1,
+       WMI_READY_EVENTID,
+
+       /* Scan specific events */
+       WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+
+       /* PDEV specific events */
+       WMI_PDEV_TPC_CONFIG_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PDEV),
+       WMI_CHAN_INFO_EVENTID,
+       WMI_PHYERR_EVENTID,
+
+       /* VDEV specific events */
+       WMI_VDEV_START_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_VDEV),
+       WMI_VDEV_STOPPED_EVENTID,
+       WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+
+       /* peer specific events */
+       WMI_PEER_STA_KICKOUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PEER),
+
+       /* beacon/mgmt specific events */
+       WMI_MGMT_RX_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MGMT),
+       WMI_HOST_SWBA_EVENTID,
+       WMI_TBTTOFFSET_UPDATE_EVENTID,
+
+       /* ADDBA Related WMI Events*/
+       WMI_TX_DELBA_COMPLETE_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_BA_NEG),
+       WMI_TX_ADDBA_COMPLETE_EVENTID,
+
+       /* Roam event to trigger roaming on host */
+       WMI_ROAM_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_ROAM),
+       WMI_PROFILE_MATCH,
+
+       /* WoW */
+       WMI_WOW_WAKEUP_HOST_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_WOW),
+
+       /* RTT */
+       WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_RTT),
+       WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+       WMI_RTT_ERROR_REPORT_EVENTID,
+
+       /* GTK offload */
+       WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GTK_OFL),
+       WMI_GTK_REKEY_FAIL_EVENTID,
+
+       /* CSA IE received event */
+       WMI_CSA_HANDLING_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_CSA_OFL),
+
+       /* Misc events */
+       WMI_ECHO_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MISC),
+       WMI_PDEV_UTF_EVENTID,
+       WMI_DEBUG_MESG_EVENTID,
+       WMI_UPDATE_STATS_EVENTID,
+       WMI_DEBUG_PRINT_EVENTID,
+       WMI_DCS_INTERFERENCE_EVENTID,
+       WMI_PDEV_QVIT_EVENTID,
+       WMI_WLAN_PROFILE_DATA_EVENTID,
+       WMI_PDEV_FTM_INTG_EVENTID,
+       WMI_WLAN_FREQ_AVOID_EVENTID,
+       WMI_VDEV_GET_KEEPALIVE_EVENTID,
+
+       /* GPIO Event */
+       WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
+};
+
+enum wmi_phy_mode {
+       MODE_11A        = 0,   /* 11a Mode */
+       MODE_11G        = 1,   /* 11b/g Mode */
+       MODE_11B        = 2,   /* 11b Mode */
+       MODE_11GONLY    = 3,   /* 11g only Mode */
+       MODE_11NA_HT20   = 4,  /* 11a HT20 mode */
+       MODE_11NG_HT20   = 5,  /* 11g HT20 mode */
+       MODE_11NA_HT40   = 6,  /* 11a HT40 mode */
+       MODE_11NG_HT40   = 7,  /* 11g HT40 mode */
+       MODE_11AC_VHT20 = 8,
+       MODE_11AC_VHT40 = 9,
+       MODE_11AC_VHT80 = 10,
+       /*    MODE_11AC_VHT160 = 11, */
+       MODE_11AC_VHT20_2G = 11,
+       MODE_11AC_VHT40_2G = 12,
+       MODE_11AC_VHT80_2G = 13,
+       MODE_UNKNOWN    = 14,
+       MODE_MAX        = 14
+};
+
+#define WMI_CHAN_LIST_TAG      0x1
+#define WMI_SSID_LIST_TAG      0x2
+#define WMI_BSSID_LIST_TAG     0x3
+#define WMI_IE_TAG             0x4
+
+struct wmi_channel {
+       __le32 mhz;
+       __le32 band_center_freq1;
+       __le32 band_center_freq2; /* valid for 11ac, 80plus80 */
+       union {
+               __le32 flags; /* WMI_CHAN_FLAG_ */
+               struct {
+                       u8 mode; /* only 6 LSBs */
+               } __packed;
+       } __packed;
+       union {
+               __le32 reginfo0;
+               struct {
+                       u8 min_power;
+                       u8 max_power;
+                       u8 reg_power;
+                       u8 reg_classid;
+               } __packed;
+       } __packed;
+       union {
+               __le32 reginfo1;
+               struct {
+                       u8 antenna_max;
+               } __packed;
+       } __packed;
+} __packed;
+
+struct wmi_channel_arg {
+       u32 freq;
+       u32 band_center_freq1;
+       bool passive;
+       bool allow_ibss;
+       bool allow_ht;
+       bool allow_vht;
+       bool ht40plus;
+       /* note: power unit is 1/4th of dBm */
+       u32 min_power;
+       u32 max_power;
+       u32 max_reg_power;
+       u32 max_antenna_gain;
+       u32 reg_class_id;
+       enum wmi_phy_mode mode;
+};
+
+enum wmi_channel_change_cause {
+       WMI_CHANNEL_CHANGE_CAUSE_NONE = 0,
+       WMI_CHANNEL_CHANGE_CAUSE_CSA,
+};
+
+#define WMI_CHAN_FLAG_HT40_PLUS      (1 << 6)
+#define WMI_CHAN_FLAG_PASSIVE        (1 << 7)
+#define WMI_CHAN_FLAG_ADHOC_ALLOWED  (1 << 8)
+#define WMI_CHAN_FLAG_AP_DISABLED    (1 << 9)
+#define WMI_CHAN_FLAG_DFS            (1 << 10)
+#define WMI_CHAN_FLAG_ALLOW_HT       (1 << 11)
+#define WMI_CHAN_FLAG_ALLOW_VHT      (1 << 12)
+
+/* Indicate reason for channel switch */
+#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
+
+#define WMI_MAX_SPATIAL_STREAM   3
+
+/* HT Capabilities*/
+#define WMI_HT_CAP_ENABLED                0x0001   /* HT Enabled/ disabled */
+#define WMI_HT_CAP_HT20_SGI       0x0002   /* Short Guard Interval with HT20 */
+#define WMI_HT_CAP_DYNAMIC_SMPS           0x0004   /* Dynamic MIMO powersave */
+#define WMI_HT_CAP_TX_STBC                0x0008   /* B3 TX STBC */
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT     3
+#define WMI_HT_CAP_RX_STBC                0x0030   /* B4-B5 RX STBC */
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT     4
+#define WMI_HT_CAP_LDPC                   0x0040   /* LDPC supported */
+#define WMI_HT_CAP_L_SIG_TXOP_PROT        0x0080   /* L-SIG TXOP Protection */
+#define WMI_HT_CAP_MPDU_DENSITY           0x0700   /* MPDU Density */
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI               0x0800
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED       | \
+                               WMI_HT_CAP_HT20_SGI      | \
+                               WMI_HT_CAP_HT40_SGI      | \
+                               WMI_HT_CAP_TX_STBC       | \
+                               WMI_HT_CAP_RX_STBC       | \
+                               WMI_HT_CAP_LDPC)
+
+
+/*
+ * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information
+ * field. The fields not defined here are not supported, or reserved.
+ * Do not change these masks and if you have to add new one follow the
+ * bitmask as specified by 802.11ac draft.
+ */
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK            0x00000003
+#define WMI_VHT_CAP_RX_LDPC                      0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ                    0x00000020
+#define WMI_VHT_CAP_TX_STBC                      0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK                 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT           8
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP            0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT      23
+#define WMI_VHT_CAP_RX_FIXED_ANT                 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT                 0x20000000
+
+/* The following also refer for max HT AMSDU */
+#define WMI_VHT_CAP_MAX_MPDU_LEN_3839            0x00000000
+#define WMI_VHT_CAP_MAX_MPDU_LEN_7935            0x00000001
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454           0x00000002
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454  | \
+                                WMI_VHT_CAP_RX_LDPC             | \
+                                WMI_VHT_CAP_SGI_80MHZ           | \
+                                WMI_VHT_CAP_TX_STBC             | \
+                                WMI_VHT_CAP_RX_STBC_MASK        | \
+                                WMI_VHT_CAP_MAX_AMPDU_LEN_EXP   | \
+                                WMI_VHT_CAP_RX_FIXED_ANT        | \
+                                WMI_VHT_CAP_TX_FIXED_ANT)
+
+/*
+ * Interested readers refer to Rx/Tx MCS Map definition as defined in
+ * 802.11ac
+ */
+#define WMI_VHT_MAX_MCS_4_SS_MASK(r, ss)      ((3 & (r)) << (((ss) - 1) << 1))
+#define WMI_VHT_MAX_SUPP_RATE_MASK           0x1fff0000
+#define WMI_VHT_MAX_SUPP_RATE_MASK_SHIFT     16
+
+enum {
+       REGDMN_MODE_11A              = 0x00001, /* 11a channels */
+       REGDMN_MODE_TURBO            = 0x00002, /* 11a turbo-only channels */
+       REGDMN_MODE_11B              = 0x00004, /* 11b channels */
+       REGDMN_MODE_PUREG            = 0x00008, /* 11g channels (OFDM only) */
+       REGDMN_MODE_11G              = 0x00008, /* XXX historical */
+       REGDMN_MODE_108G             = 0x00020, /* 11a+Turbo channels */
+       REGDMN_MODE_108A             = 0x00040, /* 11g+Turbo channels */
+       REGDMN_MODE_XR               = 0x00100, /* XR channels */
+       REGDMN_MODE_11A_HALF_RATE    = 0x00200, /* 11A half rate channels */
+       REGDMN_MODE_11A_QUARTER_RATE = 0x00400, /* 11A quarter rate channels */
+       REGDMN_MODE_11NG_HT20        = 0x00800, /* 11N-G HT20 channels */
+       REGDMN_MODE_11NA_HT20        = 0x01000, /* 11N-A HT20 channels */
+       REGDMN_MODE_11NG_HT40PLUS    = 0x02000, /* 11N-G HT40 + channels */
+       REGDMN_MODE_11NG_HT40MINUS   = 0x04000, /* 11N-G HT40 - channels */
+       REGDMN_MODE_11NA_HT40PLUS    = 0x08000, /* 11N-A HT40 + channels */
+       REGDMN_MODE_11NA_HT40MINUS   = 0x10000, /* 11N-A HT40 - channels */
+       REGDMN_MODE_11AC_VHT20       = 0x20000, /* 5Ghz, VHT20 */
+       REGDMN_MODE_11AC_VHT40PLUS   = 0x40000, /* 5Ghz, VHT40 + channels */
+       REGDMN_MODE_11AC_VHT40MINUS  = 0x80000, /* 5Ghz  VHT40 - channels */
+       REGDMN_MODE_11AC_VHT80       = 0x100000, /* 5Ghz, VHT80 channels */
+       REGDMN_MODE_ALL              = 0xffffffff
+};
+
+#define REGDMN_CAP1_CHAN_HALF_RATE        0x00000001
+#define REGDMN_CAP1_CHAN_QUARTER_RATE     0x00000002
+#define REGDMN_CAP1_CHAN_HAL49GHZ         0x00000004
+
+/* regulatory capabilities */
+#define REGDMN_EEPROM_EEREGCAP_EN_FCC_MIDBAND   0x0040
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_EVEN    0x0080
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U2         0x0100
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_MIDBAND    0x0200
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_ODD     0x0400
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_NEW_11A    0x0800
+
+struct hal_reg_capabilities {
+       /* regdomain value specified in EEPROM */
+       __le32 eeprom_rd;
+       /*regdomain */
+       __le32 eeprom_rd_ext;
+       /* CAP1 capabilities bit map. */
+       __le32 regcap1;
+       /* REGDMN EEPROM CAP. */
+       __le32 regcap2;
+       /* REGDMN MODE */
+       __le32 wireless_modes;
+       __le32 low_2ghz_chan;
+       __le32 high_2ghz_chan;
+       __le32 low_5ghz_chan;
+       __le32 high_5ghz_chan;
+} __packed;
+
+enum wlan_mode_capability {
+       WHAL_WLAN_11A_CAPABILITY   = 0x1,
+       WHAL_WLAN_11G_CAPABILITY   = 0x2,
+       WHAL_WLAN_11AG_CAPABILITY  = 0x3,
+};
+
+/* structure used by FW for requesting host memory */
+struct wlan_host_mem_req {
+       /* ID of the request */
+       __le32 req_id;
+       /* size of the  of each unit */
+       __le32 unit_size;
+       /* flags to  indicate that
+        * the number units is dependent
+        * on number of resources(num vdevs num peers .. etc)
+        */
+       __le32 num_unit_info;
+       /*
+        * actual number of units to allocate . if flags in the num_unit_info
+        * indicate that number of units is tied to number of a particular
+        * resource to allocate then  num_units filed is set to 0 and host
+        * will derive the number units from number of the resources it is
+        * requesting.
+        */
+       __le32 num_units;
+} __packed;
+
+#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \
+       ((((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
+       (1 << ((svc_id)%(sizeof(u32))))) != 0)
+
+/*
+ * The following struct holds optional payload for
+ * wmi_service_ready_event,e.g., 11ac pass some of the
+ * device capability to the host.
+ */
+struct wmi_service_ready_event {
+       __le32 sw_version;
+       __le32 sw_version_1;
+       __le32 abi_version;
+       /* WMI_PHY_CAPABILITY */
+       __le32 phy_capability;
+       /* Maximum number of frag table entries that SW will populate less 1 */
+       __le32 max_frag_entry;
+       __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
+       __le32 num_rf_chains;
+       /*
+        * The following field is only valid for service type
+        * WMI_SERVICE_11AC
+        */
+       __le32 ht_cap_info; /* WMI HT Capability */
+       __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+       __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+       __le32 hw_min_tx_power;
+       __le32 hw_max_tx_power;
+       struct hal_reg_capabilities hal_reg_capabilities;
+       __le32 sys_cap_info;
+       __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+       /*
+        * Max beacon and Probe Response IE offload size
+        * (includes optional P2P IEs)
+        */
+       __le32 max_bcn_ie_size;
+       /*
+        * request to host to allocate a chuck of memory and pss it down to FW
+        * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+        * data structures. Only valid for low latency interfaces like PCIE
+        * where FW can access this memory directly (or) by DMA.
+        */
+       __le32 num_mem_reqs;
+       struct wlan_host_mem_req mem_reqs[1];
+} __packed;
+
+/*
+ * status consists of  upper 16 bits fo int status and lower 16 bits of
+ * module ID that retuned status
+ */
+#define WLAN_INIT_STATUS_SUCCESS   0x0
+#define WLAN_GET_INIT_STATUS_REASON(status)    ((status) & 0xffff)
+#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
+#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
+
+struct wmi_ready_event {
+       __le32 sw_version;
+       __le32 abi_version;
+       struct wmi_mac_addr mac_addr;
+       __le32 status;
+} __packed;
+
+struct wmi_resource_config {
+       /* number of virtual devices (VAPs) to support */
+       __le32 num_vdevs;
+
+       /* number of peer nodes to support */
+       __le32 num_peers;
+
+       /*
+        * In offload mode target supports features like WOW, chatter and
+        * other protocol offloads. In order to support them some
+        * functionalities like reorder buffering, PN checking need to be
+        * done in target. This determines maximum number of peers suported
+        * by target in offload mode
+        */
+       __le32 num_offload_peers;
+
+       /* For target-based RX reordering */
+       __le32 num_offload_reorder_bufs;
+
+       /* number of keys per peer */
+       __le32 num_peer_keys;
+
+       /* total number of TX/RX data TIDs */
+       __le32 num_tids;
+
+       /*
+        * max skid for resolving hash collisions
+        *
+        *   The address search table is sparse, so that if two MAC addresses
+        *   result in the same hash value, the second of these conflicting
+        *   entries can slide to the next index in the address search table,
+        *   and use it, if it is unoccupied.  This ast_skid_limit parameter
+        *   specifies the upper bound on how many subsequent indices to search
+        *   over to find an unoccupied space.
+        */
+       __le32 ast_skid_limit;
+
+       /*
+        * the nominal chain mask for transmit
+        *
+        *   The chain mask may be modified dynamically, e.g. to operate AP
+        *   tx with a reduced number of chains if no clients are associated.
+        *   This configuration parameter specifies the nominal chain-mask that
+        *   should be used when not operating with a reduced set of tx chains.
+        */
+       __le32 tx_chain_mask;
+
+       /*
+        * the nominal chain mask for receive
+        *
+        *   The chain mask may be modified dynamically, e.g. for a client
+        *   to use a reduced number of chains for receive if the traffic to
+        *   the client is low enough that it doesn't require downlink MIMO
+        *   or antenna diversity.
+        *   This configuration parameter specifies the nominal chain-mask that
+        *   should be used when not operating with a reduced set of rx chains.
+        */
+       __le32 rx_chain_mask;
+
+       /*
+        * what rx reorder timeout (ms) to use for the AC
+        *
+        *   Each WMM access class (voice, video, best-effort, background) will
+        *   have its own timeout value to dictate how long to wait for missing
+        *   rx MPDUs to arrive before flushing subsequent MPDUs that have
+        *   already been received.
+        *   This parameter specifies the timeout in milliseconds for each
+        *   class.
+        */
+       __le32 rx_timeout_pri_vi;
+       __le32 rx_timeout_pri_vo;
+       __le32 rx_timeout_pri_be;
+       __le32 rx_timeout_pri_bk;
+
+       /*
+        * what mode the rx should decap packets to
+        *
+        *   MAC can decap to RAW (no decap), native wifi or Ethernet types
+        *   THis setting also determines the default TX behavior, however TX
+        *   behavior can be modified on a per VAP basis during VAP init
+        */
+       __le32 rx_decap_mode;
+
+       /* what is the maximum scan requests than can be queued */
+       __le32 scan_max_pending_reqs;
+
+       /* maximum VDEV that could use BMISS offload */
+       __le32 bmiss_offload_max_vdev;
+
+       /* maximum VDEV that could use offload roaming */
+       __le32 roam_offload_max_vdev;
+
+       /* maximum AP profiles that would push to offload roaming */
+       __le32 roam_offload_max_ap_profiles;
+
+       /*
+        * how many groups to use for mcast->ucast conversion
+        *
+        *   The target's WAL maintains a table to hold information regarding
+        *   which peers belong to a given multicast group, so that if
+        *   multicast->unicast conversion is enabled, the target can convert
+        *   multicast tx frames to a series of unicast tx frames, to each
+        *   peer within the multicast group.
+            This num_mcast_groups configuration parameter tells the target how
+        *   many multicast groups to provide storage for within its multicast
+        *   group membership table.
+        */
+       __le32 num_mcast_groups;
+
+       /*
+        * size to alloc for the mcast membership table
+        *
+        *   This num_mcast_table_elems configuration parameter tells the
+        *   target how many peer elements it needs to provide storage for in
+        *   its multicast group membership table.
+        *   These multicast group membership table elements are shared by the
+        *   multicast groups stored within the table.
+        */
+       __le32 num_mcast_table_elems;
+
+       /*
+        * whether/how to do multicast->unicast conversion
+        *
+        *   This configuration parameter specifies whether the target should
+        *   perform multicast --> unicast conversion on transmit, and if so,
+        *   what to do if it finds no entries in its multicast group
+        *   membership table for the multicast IP address in the tx frame.
+        *   Configuration value:
+        *   0 -> Do not perform multicast to unicast conversion.
+        *   1 -> Convert multicast frames to unicast, if the IP multicast
+        *        address from the tx frame is found in the multicast group
+        *        membership table.  If the IP multicast address is not found,
+        *        drop the frame.
+        *   2 -> Convert multicast frames to unicast, if the IP multicast
+        *        address from the tx frame is found in the multicast group
+        *        membership table.  If the IP multicast address is not found,
+        *        transmit the frame as multicast.
+        */
+       __le32 mcast2ucast_mode;
+
+       /*
+        * how much memory to allocate for a tx PPDU dbg log
+        *
+        *   This parameter controls how much memory the target will allocate
+        *   to store a log of tx PPDU meta-information (how large the PPDU
+        *   was, when it was sent, whether it was successful, etc.)
+        */
+       __le32 tx_dbg_log_size;
+
+       /* how many AST entries to be allocated for WDS */
+       __le32 num_wds_entries;
+
+       /*
+        * MAC DMA burst size, e.g., For target PCI limit can be
+        * 0 -default, 1 256B
+        */
+       __le32 dma_burst_size;
+
+       /*
+        * Fixed delimiters to be inserted after every MPDU to
+        * account for interface latency to avoid underrun.
+        */
+       __le32 mac_aggr_delim;
+
+       /*
+        *   determine whether target is responsible for detecting duplicate
+        *   non-aggregate MPDU and timing out stale fragments.
+        *
+        *   A-MPDU reordering is always performed on the target.
+        *
+        *   0: target responsible for frag timeout and dup checking
+        *   1: host responsible for frag timeout and dup checking
+        */
+       __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+       /*
+        * Configuration for VoW :
+        * No of Video Nodes to be supported
+        * and Max no of descriptors for each Video link (node).
+        */
+       __le32 vow_config;
+
+       /* maximum VDEV that could use GTK offload */
+       __le32 gtk_offload_max_vdev;
+
+       /* Number of msdu descriptors target should use */
+       __le32 num_msdu_desc;
+
+       /*
+        * Max. number of Tx fragments per MSDU
+        *  This parameter controls the max number of Tx fragments per MSDU.
+        *  This is sent by the target as part of the WMI_SERVICE_READY event
+        *  and is overriden by the OS shim as required.
+        */
+       __le32 max_frag_entries;
+} __packed;
+
+/* strucutre describing host memory chunk. */
+struct host_memory_chunk {
+       /* id of the request that is passed up in service ready */
+       __le32 req_id;
+       /* the physical address the memory chunk */
+       __le32 ptr;
+       /* size of the chunk */
+       __le32 size;
+} __packed;
+
+struct wmi_init_cmd {
+       struct wmi_resource_config resource_config;
+       __le32 num_host_mem_chunks;
+
+       /*
+        * variable number of host memory chunks.
+        * This should be the last element in the structure
+        */
+       struct host_memory_chunk host_mem_chunks[1];
+} __packed;
+
+/* TLV for channel list */
+struct wmi_chan_list {
+       __le32 tag; /* WMI_CHAN_LIST_TAG */
+       __le32 num_chan;
+       __le32 channel_list[0];
+} __packed;
+
+struct wmi_bssid_list {
+       __le32 tag; /* WMI_BSSID_LIST_TAG */
+       __le32 num_bssid;
+       struct wmi_mac_addr bssid_list[0];
+} __packed;
+
+struct wmi_ie_data {
+       __le32 tag; /* WMI_IE_TAG */
+       __le32 ie_len;
+       u8 ie_data[0];
+} __packed;
+
+struct wmi_ssid {
+       __le32 ssid_len;
+       u8 ssid[32];
+} __packed;
+
+struct wmi_ssid_list {
+       __le32 tag; /* WMI_SSID_LIST_TAG */
+       __le32 num_ssids;
+       struct wmi_ssid ssids[0];
+} __packed;
+
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+#define WLAN_SCAN_PARAMS_MAX_SSID    16
+#define WLAN_SCAN_PARAMS_MAX_BSSID   4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN  256
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+       WMI_SCAN_PRIORITY_VERY_LOW = 0,
+       WMI_SCAN_PRIORITY_LOW,
+       WMI_SCAN_PRIORITY_MEDIUM,
+       WMI_SCAN_PRIORITY_HIGH,
+       WMI_SCAN_PRIORITY_VERY_HIGH,
+       WMI_SCAN_PRIORITY_COUNT   /* number of priorities supported */
+};
+
+struct wmi_start_scan_cmd {
+       /* Scan ID */
+       __le32 scan_id;
+       /* Scan requestor ID */
+       __le32 scan_req_id;
+       /* VDEV id(interface) that is requesting scan */
+       __le32 vdev_id;
+       /* Scan Priority, input to scan scheduler */
+       __le32 scan_priority;
+       /* Scan events subscription */
+       __le32 notify_scan_events;
+       /* dwell time in msec on active channels */
+       __le32 dwell_time_active;
+       /* dwell time in msec on passive channels */
+       __le32 dwell_time_passive;
+       /*
+        * min time in msec on the BSS channel,only valid if atleast one
+        * VDEV is active
+        */
+       __le32 min_rest_time;
+       /*
+        * max rest time in msec on the BSS channel,only valid if at least
+        * one VDEV is active
+        */
+       /*
+        * the scanner will rest on the bss channel at least min_rest_time
+        * after min_rest_time the scanner will start checking for tx/rx
+        * activity on all VDEVs. if there is no activity the scanner will
+        * switch to off channel. if there is activity the scanner will let
+        * the radio on the bss channel until max_rest_time expires.at
+        * max_rest_time scanner will switch to off channel irrespective of
+        * activity. activity is determined by the idle_time parameter.
+        */
+       __le32 max_rest_time;
+       /*
+        * time before sending next set of probe requests.
+        * The scanner keeps repeating probe requests transmission with
+        * period specified by repeat_probe_time.
+        * The number of probe requests specified depends on the ssid_list
+        * and bssid_list
+        */
+       __le32 repeat_probe_time;
+       /* time in msec between 2 consequetive probe requests with in a set. */
+       __le32 probe_spacing_time;
+       /*
+        * data inactivity time in msec on bss channel that will be used by
+        * scanner for measuring the inactivity.
+        */
+       __le32 idle_time;
+       /* maximum time in msec allowed for scan  */
+       __le32 max_scan_time;
+       /*
+        * delay in msec before sending first probe request after switching
+        * to a channel
+        */
+       __le32 probe_delay;
+       /* Scan control flags */
+       __le32 scan_ctrl_flags;
+
+       /* Burst duration time in msecs */
+       __le32 burst_duration;
+       /*
+        * TLV (tag length value )  paramerters follow the scan_cmd structure.
+        * TLV can contain channel list, bssid list, ssid list and
+        * ie. the TLV tags are defined above;
+        */
+} __packed;
+
+struct wmi_ssid_arg {
+       int len;
+       const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+       const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+       u32 scan_id;
+       u32 scan_req_id;
+       u32 vdev_id;
+       u32 scan_priority;
+       u32 notify_scan_events;
+       u32 dwell_time_active;
+       u32 dwell_time_passive;
+       u32 min_rest_time;
+       u32 max_rest_time;
+       u32 repeat_probe_time;
+       u32 probe_spacing_time;
+       u32 idle_time;
+       u32 max_scan_time;
+       u32 probe_delay;
+       u32 scan_ctrl_flags;
+
+       u32 ie_len;
+       u32 n_channels;
+       u32 n_ssids;
+       u32 n_bssids;
+
+       u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+       u32 channels[64];
+       struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+       struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+};
+
+/* scan control flags */
+
+/* passively scan all channels including active channels */
+#define WMI_SCAN_FLAG_PASSIVE        0x1
+/* add wild card ssid probe request even though ssid_list is specified. */
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+/* add cck rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+/* add ofdm rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+/* To enable indication of Chan load and Noise floor to host */
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+/* Filter Probe request frames  */
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+/* When set, DFS channels will not be scanned */
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+/* Different FW scan engine may choose to bail out on errors.
+ * Allow the driver to have influence over that. */
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+
+/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
+#define WMI_SCAN_CLASS_MASK 0xFF000000
+
+
+enum wmi_stop_scan_type {
+       WMI_SCAN_STOP_ONE       = 0x00000000, /* stop by scan_id */
+       WMI_SCAN_STOP_VDEV_ALL  = 0x01000000, /* stop by vdev_id */
+       WMI_SCAN_STOP_ALL       = 0x04000000, /* stop all scans */
+};
+
+struct wmi_stop_scan_cmd {
+       __le32 scan_req_id;
+       __le32 scan_id;
+       __le32 req_type;
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_stop_scan_arg {
+       u32 req_id;
+       enum wmi_stop_scan_type req_type;
+       union {
+               u32 scan_id;
+               u32 vdev_id;
+       } u;
+};
+
+struct wmi_scan_chan_list_cmd {
+       __le32 num_scan_chans;
+       struct wmi_channel chan_info[0];
+} __packed;
+
+struct wmi_scan_chan_list_arg {
+       u32 n_channels;
+       struct wmi_channel_arg *channels;
+};
+
+enum wmi_bss_filter {
+       WMI_BSS_FILTER_NONE = 0,        /* no beacons forwarded */
+       WMI_BSS_FILTER_ALL,             /* all beacons forwarded */
+       WMI_BSS_FILTER_PROFILE,         /* only beacons matching profile */
+       WMI_BSS_FILTER_ALL_BUT_PROFILE, /* all but beacons matching profile */
+       WMI_BSS_FILTER_CURRENT_BSS,     /* only beacons matching current BSS */
+       WMI_BSS_FILTER_ALL_BUT_BSS,     /* all but beacons matching BSS */
+       WMI_BSS_FILTER_PROBED_SSID,     /* beacons matching probed ssid */
+       WMI_BSS_FILTER_LAST_BSS,        /* marker only */
+};
+
+enum wmi_scan_event_type {
+       WMI_SCAN_EVENT_STARTED         = 0x1,
+       WMI_SCAN_EVENT_COMPLETED       = 0x2,
+       WMI_SCAN_EVENT_BSS_CHANNEL     = 0x4,
+       WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8,
+       WMI_SCAN_EVENT_DEQUEUED        = 0x10,
+       WMI_SCAN_EVENT_PREEMPTED       = 0x20, /* possibly by high-prio scan */
+       WMI_SCAN_EVENT_START_FAILED    = 0x40,
+       WMI_SCAN_EVENT_RESTARTED       = 0x80,
+       WMI_SCAN_EVENT_MAX             = 0x8000
+};
+
+enum wmi_scan_completion_reason {
+       WMI_SCAN_REASON_COMPLETED,
+       WMI_SCAN_REASON_CANCELLED,
+       WMI_SCAN_REASON_PREEMPTED,
+       WMI_SCAN_REASON_TIMEDOUT,
+       WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_scan_event {
+       __le32 event_type; /* %WMI_SCAN_EVENT_ */
+       __le32 reason; /* %WMI_SCAN_REASON_ */
+       __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+       __le32 scan_req_id;
+       __le32 scan_id;
+       __le32 vdev_id;
+} __packed;
+
+/*
+ * This defines how much headroom is kept in the
+ * receive frame between the descriptor and the
+ * payload, in order for the WMI PHY error and
+ * management handler to insert header contents.
+ *
+ * This is in bytes.
+ */
+#define WMI_MGMT_RX_HDR_HEADROOM    52
+
+/*
+ * This event will be used for sending scan results
+ * as well as rx mgmt frames to the host. The rx buffer
+ * will be sent as part of this WMI event. It would be a
+ * good idea to pass all the fields in the RX status
+ * descriptor up to the host.
+ */
+struct wmi_mgmt_rx_hdr {
+       __le32 channel;
+       __le32 snr;
+       __le32 rate;
+       __le32 phy_mode;
+       __le32 buf_len;
+       __le32 status; /* %WMI_RX_STATUS_ */
+} __packed;
+
+struct wmi_mgmt_rx_event {
+       struct wmi_mgmt_rx_hdr hdr;
+       u8 buf[0];
+} __packed;
+
+#define WMI_RX_STATUS_OK                       0x00
+#define WMI_RX_STATUS_ERR_CRC                  0x01
+#define WMI_RX_STATUS_ERR_DECRYPT              0x08
+#define WMI_RX_STATUS_ERR_MIC                  0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS       0x20
+
+struct wmi_single_phyerr_rx_hdr {
+       /* TSF timestamp */
+       __le32 tsf_timestamp;
+
+       /*
+        * Current freq1, freq2
+        *
+        * [7:0]:    freq1[lo]
+        * [15:8] :   freq1[hi]
+        * [23:16]:   freq2[lo]
+        * [31:24]:   freq2[hi]
+        */
+       __le16 freq1;
+       __le16 freq2;
+
+       /*
+        * Combined RSSI over all chains and channel width for this PHY error
+        *
+        * [7:0]: RSSI combined
+        * [15:8]: Channel width (MHz)
+        * [23:16]: PHY error code
+        * [24:16]: reserved (future use)
+        */
+       u8 rssi_combined;
+       u8 chan_width_mhz;
+       u8 phy_err_code;
+       u8 rsvd0;
+
+       /*
+        * RSSI on chain 0 through 3
+        *
+        * This is formatted the same as the PPDU_START RX descriptor
+        * field:
+        *
+        * [7:0]:   pri20
+        * [15:8]:  sec20
+        * [23:16]: sec40
+        * [31:24]: sec80
+        */
+
+       __le32 rssi_chain0;
+       __le32 rssi_chain1;
+       __le32 rssi_chain2;
+       __le32 rssi_chain3;
+
+       /*
+        * Last calibrated NF value for chain 0 through 3
+        *
+        * nf_list_1:
+        *
+        * + [15:0] - chain 0
+        * + [31:16] - chain 1
+        *
+        * nf_list_2:
+        *
+        * + [15:0] - chain 2
+        * + [31:16] - chain 3
+        */
+       __le32 nf_list_1;
+       __le32 nf_list_2;
+
+
+       /* Length of the frame */
+       __le32 buf_len;
+} __packed;
+
+struct wmi_single_phyerr_rx_event {
+       /* Phy error event header */
+       struct wmi_single_phyerr_rx_hdr hdr;
+       /* frame buffer */
+       u8 bufp[0];
+} __packed;
+
+struct wmi_comb_phyerr_rx_hdr {
+       /* Phy error phy error count */
+       __le32 num_phyerr_events;
+       __le32 tsf_l32;
+       __le32 tsf_u32;
+} __packed;
+
+struct wmi_comb_phyerr_rx_event {
+       /* Phy error phy error count */
+       struct wmi_comb_phyerr_rx_hdr hdr;
+       /*
+        * frame buffer - contains multiple payloads in the order:
+        *                    header - payload, header - payload...
+        *  (The header is of type: wmi_single_phyerr_rx_hdr)
+        */
+       u8 bufp[0];
+} __packed;
+
+struct wmi_mgmt_tx_hdr {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       __le32 tx_rate;
+       __le32 tx_power;
+       __le32 buf_len;
+} __packed;
+
+struct wmi_mgmt_tx_cmd {
+       struct wmi_mgmt_tx_hdr hdr;
+       u8 buf[0];
+} __packed;
+
+struct wmi_echo_event {
+       __le32 value;
+} __packed;
+
+struct wmi_echo_cmd {
+       __le32 value;
+} __packed;
+
+
+struct wmi_pdev_set_regdomain_cmd {
+       __le32 reg_domain;
+       __le32 reg_domain_2G;
+       __le32 reg_domain_5G;
+       __le32 conformance_test_limit_2G;
+       __le32 conformance_test_limit_5G;
+} __packed;
+
+/* Command to set/unset chip in quiet mode */
+struct wmi_pdev_set_quiet_cmd {
+       /* period in TUs */
+       __le32 period;
+
+       /* duration in TUs */
+       __le32 duration;
+
+       /* offset in TUs */
+       __le32 next_start;
+
+       /* enable/disable */
+       __le32 enabled;
+} __packed;
+
+
+/*
+ * 802.11g protection mode.
+ */
+enum ath10k_protmode {
+       ATH10K_PROT_NONE     = 0,    /* no protection */
+       ATH10K_PROT_CTSONLY  = 1,    /* CTS to self */
+       ATH10K_PROT_RTSCTS   = 2,    /* RTS-CTS */
+};
+
+enum wmi_beacon_gen_mode {
+       WMI_BEACON_STAGGERED_MODE = 0,
+       WMI_BEACON_BURST_MODE = 1
+};
+
+enum wmi_csa_event_ies_present_flag {
+       WMI_CSA_IE_PRESENT = 0x00000001,
+       WMI_XCSA_IE_PRESENT = 0x00000002,
+       WMI_WBW_IE_PRESENT = 0x00000004,
+       WMI_CSWARP_IE_PRESENT = 0x00000008,
+};
+
+/* wmi CSA receive event from beacon frame */
+struct wmi_csa_event {
+       __le32 i_fc_dur;
+       /* Bit 0-15: FC */
+       /* Bit 16-31: DUR */
+       struct wmi_mac_addr i_addr1;
+       struct wmi_mac_addr i_addr2;
+       __le32 csa_ie[2];
+       __le32 xcsa_ie[2];
+       __le32 wb_ie[2];
+       __le32 cswarp_ie;
+       __le32 ies_present_flag; /* wmi_csa_event_ies_present_flag */
+} __packed;
+
+/* the definition of different PDEV parameters */
+#define PDEV_DEFAULT_STATS_UPDATE_PERIOD    500
+#define VDEV_DEFAULT_STATS_UPDATE_PERIOD    500
+#define PEER_DEFAULT_STATS_UPDATE_PERIOD    500
+
+enum wmi_pdev_param {
+       /* TX chian mask */
+       WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+       /* RX chian mask */
+       WMI_PDEV_PARAM_RX_CHAIN_MASK,
+       /* TX power limit for 2G Radio */
+       WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+       /* TX power limit for 5G Radio */
+       WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+       /* TX power scale */
+       WMI_PDEV_PARAM_TXPOWER_SCALE,
+       /* Beacon generation mode . 0: host, 1: target   */
+       WMI_PDEV_PARAM_BEACON_GEN_MODE,
+       /* Beacon generation mode . 0: staggered 1: bursted   */
+       WMI_PDEV_PARAM_BEACON_TX_MODE,
+       /*
+        * Resource manager off chan mode .
+        * 0: turn off off chan mode. 1: turn on offchan mode
+        */
+       WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+       /*
+        * Protection mode:
+        * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+        */
+       WMI_PDEV_PARAM_PROTECTION_MODE,
+       /* Dynamic bandwidth 0: disable 1: enable */
+       WMI_PDEV_PARAM_DYNAMIC_BW,
+       /* Non aggregrate/ 11g sw retry threshold.0-disable */
+       WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+       /* aggregrate sw retry threshold. 0-disable*/
+       WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+       /* Station kickout threshold (non of consecutive failures).0-disable */
+       WMI_PDEV_PARAM_STA_KICKOUT_TH,
+       /* Aggerate size scaling configuration per AC */
+       WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+       /* LTR enable */
+       WMI_PDEV_PARAM_LTR_ENABLE,
+       /* LTR latency for BE, in us */
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+       /* LTR latency for BK, in us */
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+       /* LTR latency for VI, in us */
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+       /* LTR latency for VO, in us  */
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+       /* LTR AC latency timeout, in ms */
+       WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+       /* LTR platform latency override, in us */
+       WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+       /* LTR-RX override, in us */
+       WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+       /* Tx activity timeout for LTR, in us */
+       WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+       /* L1SS state machine enable */
+       WMI_PDEV_PARAM_L1SS_ENABLE,
+       /* Deep sleep state machine enable */
+       WMI_PDEV_PARAM_DSLEEP_ENABLE,
+       /* RX buffering flush enable */
+       WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+       /* RX buffering matermark */
+       WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+       /* RX buffering timeout enable */
+       WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+       /* RX buffering timeout value */
+       WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+       /* pdev level stats update period in ms */
+       WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+       /* vdev level stats update period in ms */
+       WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+       /* peer level stats update period in ms */
+       WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+       /* beacon filter status update period */
+       WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+       /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+       WMI_PDEV_PARAM_PMF_QOS,
+       /* Access category on which ARP frames are sent */
+       WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+       /* DCS configuration */
+       WMI_PDEV_PARAM_DCS,
+       /* Enable/Disable ANI on target */
+       WMI_PDEV_PARAM_ANI_ENABLE,
+       /* configure the ANI polling period */
+       WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+       /* configure the ANI listening period */
+       WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+       /* configure OFDM immunity level */
+       WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+       /* configure CCK immunity level */
+       WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+       /* Enable/Disable CDD for 1x1 STAs in rate control module */
+       WMI_PDEV_PARAM_DYNTXCHAIN,
+       /* Enable/Disable proxy STA */
+       WMI_PDEV_PARAM_PROXY_STA,
+       /* Enable/Disable low power state when all VDEVs are inactive/idle. */
+       WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+       /* Enable/Disable power gating sleep */
+       WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+};
+
+struct wmi_pdev_set_param_cmd {
+       __le32 param_id;
+       __le32 param_value;
+} __packed;
+
+struct wmi_pdev_get_tpc_config_cmd {
+       /* parameter   */
+       __le32 param;
+} __packed;
+
+#define WMI_TPC_RATE_MAX               160
+#define WMI_TPC_TX_N_CHAIN             4
+
+enum wmi_tpc_config_event_flag {
+       WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD     = 0x1,
+       WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC    = 0x2,
+       WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF    = 0x4,
+};
+
+struct wmi_pdev_tpc_config_event {
+       __le32 reg_domain;
+       __le32 chan_freq;
+       __le32 phy_mode;
+       __le32 twice_antenna_reduction;
+       __le32 twice_max_rd_power;
+       s32 twice_antenna_gain;
+       __le32 power_limit;
+       __le32 rate_max;
+       __le32 num_tx_chain;
+       __le32 ctl;
+       __le32 flags;
+       s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN];
+       s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+       s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+       s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+       u8 rates_array[WMI_TPC_RATE_MAX];
+} __packed;
+
+/* Transmit power scale factor. */
+enum wmi_tp_scale {
+       WMI_TP_SCALE_MAX    = 0,        /* no scaling (default) */
+       WMI_TP_SCALE_50     = 1,        /* 50% of max (-3 dBm) */
+       WMI_TP_SCALE_25     = 2,        /* 25% of max (-6 dBm) */
+       WMI_TP_SCALE_12     = 3,        /* 12% of max (-9 dBm) */
+       WMI_TP_SCALE_MIN    = 4,        /* min, but still on   */
+       WMI_TP_SCALE_SIZE   = 5,        /* max num of enum     */
+};
+
+struct wmi_set_channel_cmd {
+       /* channel (only frequency and mode info are used) */
+       struct wmi_channel chan;
+} __packed;
+
+struct wmi_pdev_chanlist_update_event {
+       /* number of channels */
+       __le32 num_chan;
+       /* array of channels */
+       struct wmi_channel channel_list[1];
+} __packed;
+
+#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32)
+
+struct wmi_debug_mesg_event {
+       /* message buffer, NULL terminated */
+       char bufp[WMI_MAX_DEBUG_MESG];
+} __packed;
+
+enum {
+       /* P2P device */
+       VDEV_SUBTYPE_P2PDEV = 0,
+       /* P2P client */
+       VDEV_SUBTYPE_P2PCLI,
+       /* P2P GO */
+       VDEV_SUBTYPE_P2PGO,
+       /* BT3.0 HS */
+       VDEV_SUBTYPE_BT,
+};
+
+struct wmi_pdev_set_channel_cmd {
+       /* idnore power , only use flags , mode and freq */
+       struct wmi_channel chan;
+} __packed;
+
+/* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
+#define WMI_DSCP_MAP_MAX    (64)
+struct wmi_pdev_set_dscp_tid_map_cmd {
+       /* map indicating DSCP to TID conversion */
+       __le32 dscp_to_tid_map[WMI_DSCP_MAP_MAX];
+} __packed;
+
+enum mcast_bcast_rate_id {
+       WMI_SET_MCAST_RATE,
+       WMI_SET_BCAST_RATE
+};
+
+struct mcast_bcast_rate {
+       enum mcast_bcast_rate_id rate_id;
+       __le32 rate;
+} __packed;
+
+struct wmi_wmm_params {
+       __le32 cwmin;
+       __le32 cwmax;
+       __le32 aifs;
+       __le32 txop;
+       __le32 acm;
+       __le32 no_ack;
+} __packed;
+
+struct wmi_pdev_set_wmm_params {
+       struct wmi_wmm_params ac_be;
+       struct wmi_wmm_params ac_bk;
+       struct wmi_wmm_params ac_vi;
+       struct wmi_wmm_params ac_vo;
+} __packed;
+
+struct wmi_wmm_params_arg {
+       u32 cwmin;
+       u32 cwmax;
+       u32 aifs;
+       u32 txop;
+       u32 acm;
+       u32 no_ack;
+};
+
+struct wmi_pdev_set_wmm_params_arg {
+       struct wmi_wmm_params_arg ac_be;
+       struct wmi_wmm_params_arg ac_bk;
+       struct wmi_wmm_params_arg ac_vi;
+       struct wmi_wmm_params_arg ac_vo;
+};
+
+struct wal_dbg_tx_stats {
+       /* Num HTT cookies queued to dispatch list */
+       __le32 comp_queued;
+
+       /* Num HTT cookies dispatched */
+       __le32 comp_delivered;
+
+       /* Num MSDU queued to WAL */
+       __le32 msdu_enqued;
+
+       /* Num MPDU queue to WAL */
+       __le32 mpdu_enqued;
+
+       /* Num MSDUs dropped by WMM limit */
+       __le32 wmm_drop;
+
+       /* Num Local frames queued */
+       __le32 local_enqued;
+
+       /* Num Local frames done */
+       __le32 local_freed;
+
+       /* Num queued to HW */
+       __le32 hw_queued;
+
+       /* Num PPDU reaped from HW */
+       __le32 hw_reaped;
+
+       /* Num underruns */
+       __le32 underrun;
+
+       /* Num PPDUs cleaned up in TX abort */
+       __le32 tx_abort;
+
+       /* Num MPDUs requed by SW */
+       __le32 mpdus_requed;
+
+       /* excessive retries */
+       __le32 tx_ko;
+
+       /* data hw rate code */
+       __le32 data_rc;
+
+       /* Scheduler self triggers */
+       __le32 self_triggers;
+
+       /* frames dropped due to excessive sw retries */
+       __le32 sw_retry_failure;
+
+       /* illegal rate phy errors  */
+       __le32 illgl_rate_phy_err;
+
+       /* wal pdev continous xretry */
+       __le32 pdev_cont_xretry;
+
+       /* wal pdev continous xretry */
+       __le32 pdev_tx_timeout;
+
+       /* wal pdev resets  */
+       __le32 pdev_resets;
+
+       __le32 phy_underrun;
+
+       /* MPDU is more than txop limit */
+       __le32 txop_ovf;
+} __packed;
+
+struct wal_dbg_rx_stats {
+       /* Cnts any change in ring routing mid-ppdu */
+       __le32 mid_ppdu_route_change;
+
+       /* Total number of statuses processed */
+       __le32 status_rcvd;
+
+       /* Extra frags on rings 0-3 */
+       __le32 r0_frags;
+       __le32 r1_frags;
+       __le32 r2_frags;
+       __le32 r3_frags;
+
+       /* MSDUs / MPDUs delivered to HTT */
+       __le32 htt_msdus;
+       __le32 htt_mpdus;
+
+       /* MSDUs / MPDUs delivered to local stack */
+       __le32 loc_msdus;
+       __le32 loc_mpdus;
+
+       /* AMSDUs that have more MSDUs than the status ring size */
+       __le32 oversize_amsdu;
+
+       /* Number of PHY errors */
+       __le32 phy_errs;
+
+       /* Number of PHY errors drops */
+       __le32 phy_err_drop;
+
+       /* Number of mpdu errors - FCS, MIC, ENC etc. */
+       __le32 mpdu_errs;
+} __packed;
+
+struct wal_dbg_peer_stats {
+       /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+       __le32 dummy;
+} __packed;
+
+struct wal_dbg_stats {
+       struct wal_dbg_tx_stats tx;
+       struct wal_dbg_rx_stats rx;
+       struct wal_dbg_peer_stats peer;
+} __packed;
+
+enum wmi_stats_id {
+       WMI_REQUEST_PEER_STAT   = 0x01,
+       WMI_REQUEST_AP_STAT     = 0x02
+};
+
+struct wmi_request_stats_cmd {
+       __le32 stats_id;
+
+       /*
+        * Space to add parameters like
+        * peer mac addr
+        */
+} __packed;
+
+/* Suspend option */
+enum {
+       /* suspend */
+       WMI_PDEV_SUSPEND,
+
+       /* suspend and disable all interrupts */
+       WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct wmi_pdev_suspend_cmd {
+       /* suspend option sent to target */
+       __le32 suspend_opt;
+} __packed;
+
+struct wmi_stats_event {
+       __le32 stats_id; /* %WMI_REQUEST_ */
+       /*
+        * number of pdev stats event structures
+        * (wmi_pdev_stats) 0 or 1
+        */
+       __le32 num_pdev_stats;
+       /*
+        * number of vdev stats event structures
+        * (wmi_vdev_stats) 0 or max vdevs
+        */
+       __le32 num_vdev_stats;
+       /*
+        * number of peer stats event structures
+        * (wmi_peer_stats) 0 or max peers
+        */
+       __le32 num_peer_stats;
+       __le32 num_bcnflt_stats;
+       /*
+        * followed by
+        *   num_pdev_stats * size of(struct wmi_pdev_stats)
+        *   num_vdev_stats * size of(struct wmi_vdev_stats)
+        *   num_peer_stats * size of(struct wmi_peer_stats)
+        *
+        *  By having a zero sized array, the pointer to data area
+        *  becomes available without increasing the struct size
+        */
+       u8 data[0];
+} __packed;
+
+/*
+ * PDEV statistics
+ * TODO: add all PDEV stats here
+ */
+struct wmi_pdev_stats {
+       __le32 chan_nf;        /* Channel noise floor */
+       __le32 tx_frame_count; /* TX frame count */
+       __le32 rx_frame_count; /* RX frame count */
+       __le32 rx_clear_count; /* rx clear count */
+       __le32 cycle_count;    /* cycle count */
+       __le32 phy_err_count;  /* Phy error count */
+       __le32 chan_tx_pwr;    /* channel tx power */
+       struct wal_dbg_stats wal; /* WAL dbg stats */
+} __packed;
+
+/*
+ * VDEV statistics
+ * TODO: add all VDEV stats here
+ */
+struct wmi_vdev_stats {
+       __le32 vdev_id;
+} __packed;
+
+/*
+ * peer statistics.
+ * TODO: add more stats
+ */
+struct wmi_peer_stats {
+       struct wmi_mac_addr peer_macaddr;
+       __le32 peer_rssi;
+       __le32 peer_tx_rate;
+} __packed;
+
+struct wmi_vdev_create_cmd {
+       __le32 vdev_id;
+       __le32 vdev_type;
+       __le32 vdev_subtype;
+       struct wmi_mac_addr vdev_macaddr;
+} __packed;
+
+enum wmi_vdev_type {
+       WMI_VDEV_TYPE_AP      = 1,
+       WMI_VDEV_TYPE_STA     = 2,
+       WMI_VDEV_TYPE_IBSS    = 3,
+       WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+       WMI_VDEV_SUBTYPE_NONE       = 0,
+       WMI_VDEV_SUBTYPE_P2P_DEVICE = 1,
+       WMI_VDEV_SUBTYPE_P2P_CLIENT = 2,
+       WMI_VDEV_SUBTYPE_P2P_GO     = 3,
+};
+
+/* values for vdev_subtype */
+
+/* values for vdev_start_request flags */
+/*
+ * Indicates that AP VDEV uses hidden ssid. only valid for
+ *  AP/GO */
+#define WMI_VDEV_START_HIDDEN_SSID  (1<<0)
+/*
+ * Indicates if robust management frame/management frame
+ *  protection is enabled. For GO/AP vdevs, it indicates that
+ *  it may support station/client associations with RMF enabled.
+ *  For STA/client vdevs, it indicates that sta will
+ *  associate with AP with RMF enabled. */
+#define WMI_VDEV_START_PMF_ENABLED  (1<<1)
+
+struct wmi_p2p_noa_descriptor {
+       __le32 type_count; /* 255: continuous schedule, 0: reserved */
+       __le32 duration;  /* Absent period duration in micro seconds */
+       __le32 interval;   /* Absent period interval in micro seconds */
+       __le32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+struct wmi_vdev_start_request_cmd {
+       /* WMI channel */
+       struct wmi_channel chan;
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* requestor id identifying the caller module */
+       __le32 requestor_id;
+       /* beacon interval from received beacon */
+       __le32 beacon_interval;
+       /* DTIM Period from the received beacon */
+       __le32 dtim_period;
+       /* Flags */
+       __le32 flags;
+       /* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
+       struct wmi_ssid ssid;
+       /* beacon/probe reponse xmit rate. Applicable for SoftAP. */
+       __le32 bcn_tx_rate;
+       /* beacon/probe reponse xmit power. Applicable for SoftAP. */
+       __le32 bcn_tx_power;
+       /* number of p2p NOA descriptor(s) from scan entry */
+       __le32 num_noa_descriptors;
+       /*
+        * Disable H/W ack. This used by WMI_VDEV_RESTART_REQUEST_CMDID.
+        * During CAC, Our HW shouldn't ack ditected frames
+        */
+       __le32 disable_hw_ack;
+       /* actual p2p NOA descriptor from scan entry */
+       struct wmi_p2p_noa_descriptor noa_descriptors[2];
+} __packed;
+
+struct wmi_vdev_restart_request_cmd {
+       struct wmi_vdev_start_request_cmd vdev_start_request_cmd;
+} __packed;
+
+struct wmi_vdev_start_request_arg {
+       u32 vdev_id;
+       struct wmi_channel_arg channel;
+       u32 bcn_intval;
+       u32 dtim_period;
+       u8 *ssid;
+       u32 ssid_len;
+       u32 bcn_tx_rate;
+       u32 bcn_tx_power;
+       bool disable_hw_ack;
+       bool hidden_ssid;
+       bool pmf_enabled;
+};
+
+struct wmi_vdev_delete_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+       __le32 vdev_id;
+       __le32 vdev_assoc_id;
+       struct wmi_mac_addr vdev_bssid;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_standby_response_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_response_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_set_param_cmd {
+       __le32 vdev_id;
+       __le32 param_id;
+       __le32 param_value;
+} __packed;
+
+#define WMI_MAX_KEY_INDEX   3
+#define WMI_MAX_KEY_LEN     32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP    0x01
+#define WMI_KEY_TX_USAGE 0x02 /* default tx key - static wep */
+
+struct wmi_key_seq_counter {
+       __le32 key_seq_counter_l;
+       __le32 key_seq_counter_h;
+} __packed;
+
+#define WMI_CIPHER_NONE     0x0 /* clear key */
+#define WMI_CIPHER_WEP      0x1
+#define WMI_CIPHER_TKIP     0x2
+#define WMI_CIPHER_AES_OCB  0x3
+#define WMI_CIPHER_AES_CCM  0x4
+#define WMI_CIPHER_WAPI     0x5
+#define WMI_CIPHER_CKIP     0x6
+#define WMI_CIPHER_AES_CMAC 0x7
+
+struct wmi_vdev_install_key_cmd {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       __le32 key_idx;
+       __le32 key_flags;
+       __le32 key_cipher; /* %WMI_CIPHER_ */
+       struct wmi_key_seq_counter key_rsc_counter;
+       struct wmi_key_seq_counter key_global_rsc_counter;
+       struct wmi_key_seq_counter key_tsc_counter;
+       u8 wpi_key_rsc_counter[16];
+       u8 wpi_key_tsc_counter[16];
+       __le32 key_len;
+       __le32 key_txmic_len;
+       __le32 key_rxmic_len;
+
+       /* contains key followed by tx mic followed by rx mic */
+       u8 key_data[0];
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+       u32 vdev_id;
+       const u8 *macaddr;
+       u32 key_idx;
+       u32 key_flags;
+       u32 key_cipher;
+       u32 key_len;
+       u32 key_txmic_len;
+       u32 key_rxmic_len;
+       const void *key_data;
+};
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+       WMI_RATE_PREAMBLE_OFDM,
+       WMI_RATE_PREAMBLE_CCK,
+       WMI_RATE_PREAMBLE_HT,
+       WMI_RATE_PREAMBLE_VHT,
+};
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE    (0xff)
+
+/* the definition of different VDEV parameters */
+enum wmi_vdev_param {
+       /* RTS Threshold */
+       WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+       /* Fragmentation threshold */
+       WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+       /* beacon interval in TUs */
+       WMI_VDEV_PARAM_BEACON_INTERVAL,
+       /* Listen interval in TUs */
+       WMI_VDEV_PARAM_LISTEN_INTERVAL,
+       /* muticast rate in Mbps */
+       WMI_VDEV_PARAM_MULTICAST_RATE,
+       /* management frame rate in Mbps */
+       WMI_VDEV_PARAM_MGMT_TX_RATE,
+       /* slot time (long vs short) */
+       WMI_VDEV_PARAM_SLOT_TIME,
+       /* preamble (long vs short) */
+       WMI_VDEV_PARAM_PREAMBLE,
+       /* SWBA time (time before tbtt in msec) */
+       WMI_VDEV_PARAM_SWBA_TIME,
+       /* time period for updating VDEV stats */
+       WMI_VDEV_STATS_UPDATE_PERIOD,
+       /* age out time in msec for frames queued for station in power save */
+       WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+       /*
+        * Host SWBA interval (time in msec before tbtt for SWBA event
+        * generation).
+        */
+       WMI_VDEV_HOST_SWBA_INTERVAL,
+       /* DTIM period (specified in units of num beacon intervals) */
+       WMI_VDEV_PARAM_DTIM_PERIOD,
+       /*
+        * scheduler air time limit for this VDEV. used by off chan
+        * scheduler.
+        */
+       WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+       /* enable/dsiable WDS for this VDEV  */
+       WMI_VDEV_PARAM_WDS,
+       /* ATIM Window */
+       WMI_VDEV_PARAM_ATIM_WINDOW,
+       /* BMISS max */
+       WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+       /* BMISS first time */
+       WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+       /* BMISS final time */
+       WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+       /* WMM enables/disabled */
+       WMI_VDEV_PARAM_FEATURE_WMM,
+       /* Channel width */
+       WMI_VDEV_PARAM_CHWIDTH,
+       /* Channel Offset */
+       WMI_VDEV_PARAM_CHEXTOFFSET,
+       /* Disable HT Protection */
+       WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+       /* Quick STA Kickout */
+       WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+       /* Rate to be used with Management frames */
+       WMI_VDEV_PARAM_MGMT_RATE,
+       /* Protection Mode */
+       WMI_VDEV_PARAM_PROTECTION_MODE,
+       /* Fixed rate setting */
+       WMI_VDEV_PARAM_FIXED_RATE,
+       /* Short GI Enable/Disable */
+       WMI_VDEV_PARAM_SGI,
+       /* Enable LDPC */
+       WMI_VDEV_PARAM_LDPC,
+       /* Enable Tx STBC */
+       WMI_VDEV_PARAM_TX_STBC,
+       /* Enable Rx STBC */
+       WMI_VDEV_PARAM_RX_STBC,
+       /* Intra BSS forwarding  */
+       WMI_VDEV_PARAM_INTRA_BSS_FWD,
+       /* Setting Default xmit key for Vdev */
+       WMI_VDEV_PARAM_DEF_KEYID,
+       /* NSS width */
+       WMI_VDEV_PARAM_NSS,
+       /* Set the custom rate for the broadcast data frames */
+       WMI_VDEV_PARAM_BCAST_DATA_RATE,
+       /* Set the custom rate (rate-code) for multicast data frames */
+       WMI_VDEV_PARAM_MCAST_DATA_RATE,
+       /* Tx multicast packet indicate Enable/Disable */
+       WMI_VDEV_PARAM_MCAST_INDICATE,
+       /* Tx DHCP packet indicate Enable/Disable */
+       WMI_VDEV_PARAM_DHCP_INDICATE,
+       /* Enable host inspection of Tx unicast packet to unknown destination */
+       WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+       /* The minimum amount of time AP begins to consider STA inactive */
+       WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+       /*
+        * An associated STA is considered inactive when there is no recent
+        * TX/RX activity and no downlink frames are buffered for it. Once a
+        * STA exceeds the maximum idle inactive time, the AP will send an
+        * 802.11 data-null as a keep alive to verify the STA is still
+        * associated. If the STA does ACK the data-null, or if the data-null
+        * is buffered and the STA does not retrieve it, the STA will be
+        * considered unresponsive
+        * (see WMI_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+        */
+       WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+       /*
+        * An associated STA is considered unresponsive if there is no recent
+        * TX/RX activity and downlink frames are buffered for it. Once a STA
+        * exceeds the maximum unresponsive time, the AP will send a
+        * WMI_STA_KICKOUT event to the host so the STA can be deleted. */
+       WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+       /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+       WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+       /* Enable/Disable RTS-CTS */
+       WMI_VDEV_PARAM_ENABLE_RTSCTS,
+       /* Enable TXBFee/er */
+       WMI_VDEV_PARAM_TXBF,
+
+       /* Set packet power save */
+       WMI_VDEV_PARAM_PACKET_POWERSAVE,
+
+       /*
+        * Drops un-encrypted packets if eceived in an encrypted connection
+        * otherwise forwards to host.
+        */
+       WMI_VDEV_PARAM_DROP_UNENCRY,
+
+       /*
+        * Set the encapsulation type for frames.
+        */
+       WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+};
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG                0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT       0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG         0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT                0x2
+
+enum wmi_start_event_param {
+       WMI_VDEV_RESP_START_EVENT = 0,
+       WMI_VDEV_RESP_RESTART_EVENT,
+};
+
+struct wmi_vdev_start_response_event {
+       __le32 vdev_id;
+       __le32 req_id;
+       __le32 resp_type; /* %WMI_VDEV_RESP_ */
+       __le32 status;
+} __packed;
+
+struct wmi_vdev_standby_req_event {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_req_event {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+/*
+ * common structure used for simple events
+ * (stopped, resume_req, standby response)
+ */
+struct wmi_vdev_simple_event {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+} __packed;
+
+/* VDEV start response status codes */
+/* VDEV succesfully started */
+#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0
+
+/* requested VDEV not found */
+#define WMI_INIFIED_VDEV_START_RESPONSE_INVALID_VDEVID 0x1
+
+/* unsupported VDEV combination */
+#define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED  0x2
+
+/* Beacon processing related command and event structures */
+struct wmi_bcn_tx_hdr {
+       __le32 vdev_id;
+       __le32 tx_rate;
+       __le32 tx_power;
+       __le32 bcn_len;
+} __packed;
+
+struct wmi_bcn_tx_cmd {
+       struct wmi_bcn_tx_hdr hdr;
+       u8 *bcn[0];
+} __packed;
+
+struct wmi_bcn_tx_arg {
+       u32 vdev_id;
+       u32 tx_rate;
+       u32 tx_power;
+       u32 bcn_len;
+       const void *bcn;
+};
+
+/* Beacon filter */
+#define WMI_BCN_FILTER_ALL   0 /* Filter all beacons */
+#define WMI_BCN_FILTER_NONE  1 /* Pass all beacons */
+#define WMI_BCN_FILTER_RSSI  2 /* Pass Beacons RSSI >= RSSI threshold */
+#define WMI_BCN_FILTER_BSSID 3 /* Pass Beacons with matching BSSID */
+#define WMI_BCN_FILTER_SSID  4 /* Pass Beacons with matching SSID */
+
+struct wmi_bcn_filter_rx_cmd {
+       /* Filter ID */
+       __le32 bcn_filter_id;
+       /* Filter type - wmi_bcn_filter */
+       __le32 bcn_filter;
+       /* Buffer len */
+       __le32 bcn_filter_len;
+       /* Filter info (threshold, BSSID, RSSI) */
+       u8 *bcn_filter_buf;
+} __packed;
+
+/* Capabilities and IEs to be passed to firmware */
+struct wmi_bcn_prb_info {
+       /* Capabilities */
+       __le32 caps;
+       /* ERP info */
+       __le32 erp;
+       /* Advanced capabilities */
+       /* HT capabilities */
+       /* HT Info */
+       /* ibss_dfs */
+       /* wpa Info */
+       /* rsn Info */
+       /* rrm info */
+       /* ath_ext */
+       /* app IE */
+} __packed;
+
+struct wmi_bcn_tmpl_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* TIM IE offset from the beginning of the template. */
+       __le32 tim_ie_offset;
+       /* beacon probe capabilities and IEs */
+       struct wmi_bcn_prb_info bcn_prb_info;
+       /* beacon buffer length */
+       __le32 buf_len;
+       /* variable length data */
+       u8 data[1];
+} __packed;
+
+struct wmi_prb_tmpl_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* beacon probe capabilities and IEs */
+       struct wmi_bcn_prb_info bcn_prb_info;
+       /* beacon buffer length */
+       __le32 buf_len;
+       /* Variable length data */
+       u8 data[1];
+} __packed;
+
+enum wmi_sta_ps_mode {
+       /* enable power save for the given STA VDEV */
+       WMI_STA_PS_MODE_DISABLED = 0,
+       /* disable power save  for a given STA VDEV */
+       WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+struct wmi_sta_powersave_mode_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+
+       /*
+        * Power save mode
+        * (see enum wmi_sta_ps_mode)
+        */
+       __le32 sta_ps_mode;
+} __packed;
+
+enum wmi_csa_offload_en {
+       WMI_CSA_OFFLOAD_DISABLE = 0,
+       WMI_CSA_OFFLOAD_ENABLE = 1,
+};
+
+struct wmi_csa_offload_enable_cmd {
+       __le32 vdev_id;
+       __le32 csa_offload_enable;
+} __packed;
+
+struct wmi_csa_offload_chanswitch_cmd {
+       __le32 vdev_id;
+       struct wmi_channel chan;
+} __packed;
+
+/*
+ * This parameter controls the policy for retrieving frames from AP while the
+ * STA is in sleep state.
+ *
+ * Only takes affect if the sta_ps_mode is enabled
+ */
+enum wmi_sta_ps_param_rx_wake_policy {
+       /*
+        * Wake up when ever there is an  RX activity on the VDEV. In this mode
+        * the Power save SM(state machine) will come out of sleep by either
+        * sending null frame (or) a data frame (with PS==0) in response to TIM
+        * bit set in the received beacon frame from AP.
+        */
+       WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+
+       /*
+        * Here the power save state machine will not wakeup in response to TIM
+        * bit, instead it will send a PSPOLL (or) UASPD trigger based on UAPSD
+        * configuration setup by WMISET_PS_SET_UAPSD  WMI command.  When all
+        * access categories are delivery-enabled, the station will send a
+        * UAPSD trigger frame, otherwise it will send a PS-Poll.
+        */
+       WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+/*
+ * Number of tx frames/beacon  that cause the power save SM to wake up.
+ *
+ * Value 1 causes the SM to wake up for every TX. Value 0 has a special
+ * meaning, It will cause the SM to never wake up. This is useful if you want
+ * to keep the system to sleep all the time for some kind of test mode . host
+ * can change this parameter any time.  It will affect at the next tx frame.
+ */
+enum wmi_sta_ps_param_tx_wake_threshold {
+       WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+       WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+       /*
+        * Values greater than one indicate that many TX attempts per beacon
+        * interval before the STA will wake up
+        */
+};
+
+/*
+ * The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+       WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+       /*
+        * Values greater than 0 indicate the maximum numer of PS-Poll frames
+        * FW will send before waking up.
+        */
+};
+
+/*
+ * This will include the delivery and trigger enabled state for every AC.
+ * This is the negotiated state with AP. The host MLME needs to set this based
+ * on AP capability and the state Set in the association request by the
+ * station MLME.Lower 8 bits of the value specify the UAPSD configuration.
+ */
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+       ((type ==  WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1)))
+
+enum wmi_sta_ps_param_uapsd {
+       WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+       WMI_STA_PS_UAPSD_AC0_TRIGGER_EN  = (1 << 1),
+       WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+       WMI_STA_PS_UAPSD_AC1_TRIGGER_EN  = (1 << 3),
+       WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+       WMI_STA_PS_UAPSD_AC2_TRIGGER_EN  = (1 << 5),
+       WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+       WMI_STA_PS_UAPSD_AC3_TRIGGER_EN  = (1 << 7),
+};
+
+enum wmi_sta_powersave_param {
+       /*
+        * Controls how frames are retrievd from AP while STA is sleeping
+        *
+        * (see enum wmi_sta_ps_param_rx_wake_policy)
+        */
+       WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+
+       /*
+        * The STA will go active after this many TX
+        *
+        * (see enum wmi_sta_ps_param_tx_wake_threshold)
+        */
+       WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+
+       /*
+        * Number of PS-Poll to send before STA wakes up
+        *
+        * (see enum wmi_sta_ps_param_pspoll_count)
+        *
+        */
+       WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+
+       /*
+        * TX/RX inactivity time in msec before going to sleep.
+        *
+        * The power save SM will monitor tx/rx activity on the VDEV, if no
+        * activity for the specified msec of the parameter the Power save
+        * SM will go to sleep.
+        */
+       WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+
+       /*
+        * Set uapsd configuration.
+        *
+        * (see enum wmi_sta_ps_param_uapsd)
+        */
+       WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+struct wmi_sta_powersave_param_cmd {
+       __le32 vdev_id;
+       __le32 param_id; /* %WMI_STA_PS_PARAM_ */
+       __le32 param_value;
+} __packed;
+
+/* No MIMO power save */
+#define WMI_STA_MIMO_PS_MODE_DISABLE
+/* mimo powersave mode static*/
+#define WMI_STA_MIMO_PS_MODE_STATIC
+/* mimo powersave mode dynamic */
+#define WMI_STA_MIMO_PS_MODE_DYNAMIC
+
+struct wmi_sta_mimo_ps_mode_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* mimo powersave mode as defined above */
+       __le32 mimo_pwrsave_mode;
+} __packed;
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+       WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+       WMI_AP_PS_UAPSD_AC0_TRIGGER_EN  = (1 << 1),
+       WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+       WMI_AP_PS_UAPSD_AC1_TRIGGER_EN  = (1 << 3),
+       WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+       WMI_AP_PS_UAPSD_AC2_TRIGGER_EN  = (1 << 5),
+       WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+       WMI_AP_PS_UAPSD_AC3_TRIGGER_EN  = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+       WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+       WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+       WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+       WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+       MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+/*
+ * AP power save parameter
+ * Set a power save specific parameter for a peer station
+ */
+enum wmi_ap_ps_peer_param {
+       /* Set uapsd configuration for a given peer.
+        *
+        * Include the delivery and trigger enabled state for every AC.
+        * The host  MLME needs to set this based on AP capability and stations
+        * request Set in the association request  received from the station.
+        *
+        * Lower 8 bits of the value specify the UAPSD configuration.
+        *
+        * (see enum wmi_ap_ps_param_uapsd)
+        * The default value is 0.
+        */
+       WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+       /*
+        * Set the service period for a UAPSD capable station
+        *
+        * The service period from wme ie in the (re)assoc request frame.
+        *
+        * (see enum wmi_ap_ps_peer_param_max_sp)
+        */
+       WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+       /* Time in seconds for aging out buffered frames for STA in PS */
+       WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+};
+
+struct wmi_ap_ps_peer_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+
+       /* AP powersave param (see enum wmi_ap_ps_peer_param) */
+       __le32 param_id;
+
+       /* AP powersave param value */
+       __le32 param_value;
+} __packed;
+
+/* 128 clients = 4 words */
+#define WMI_TIM_BITMAP_ARRAY_SIZE 4
+
+struct wmi_tim_info {
+       __le32 tim_len;
+       __le32 tim_mcast;
+       __le32 tim_bitmap[WMI_TIM_BITMAP_ARRAY_SIZE];
+       __le32 tim_changed;
+       __le32 tim_num_ps_pending;
+} __packed;
+
+/* Maximum number of NOA Descriptors supported */
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+#define WMI_P2P_OPPPS_ENABLE_BIT       BIT(0)
+#define WMI_P2P_OPPPS_CTWINDOW_OFFSET  1
+#define WMI_P2P_NOA_CHANGED_BIT        BIT(0)
+
+struct wmi_p2p_noa_info {
+       /* Bit 0 - Flag to indicate an update in NOA schedule
+          Bits 7-1 - Reserved */
+       u8 changed;
+       /* NOA index */
+       u8 index;
+       /* Bit 0 - Opp PS state of the AP
+          Bits 1-7 - Ctwindow in TUs */
+       u8 ctwindow_oppps;
+       /* Number of NOA descriptors */
+       u8 num_descriptors;
+
+       struct wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_bcn_info {
+       struct wmi_tim_info tim_info;
+       struct wmi_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_host_swba_event {
+       __le32 vdev_map;
+       struct wmi_bcn_info bcn_info[1];
+} __packed;
+
+#define WMI_MAX_AP_VDEV 16
+
+struct wmi_tbtt_offset_event {
+       __le32 vdev_map;
+       __le32 tbttoffset_list[WMI_MAX_AP_VDEV];
+} __packed;
+
+
+struct wmi_peer_create_cmd {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_delete_cmd {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       __le32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_fixed_rate {
+       /*
+        * rate mode . 0: disable fixed rate (auto rate)
+        *   1: legacy (non 11n) rate  specified as ieee rate 2*Mbps
+        *   2: ht20 11n rate  specified as mcs index
+        *   3: ht40 11n rate  specified as mcs index
+        */
+       __le32  rate_mode;
+       /*
+        * 4 rate values for 4 rate series. series 0 is stored in byte 0 (LSB)
+        * and series 3 is stored at byte 3 (MSB)
+        */
+       __le32  rate_series;
+       /*
+        * 4 retry counts for 4 rate series. retry count for rate 0 is stored
+        * in byte 0 (LSB) and retry count for rate 3 is stored at byte 3
+        * (MSB)
+        */
+       __le32  rate_retries;
+} __packed;
+
+struct wmi_peer_fixed_rate_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+       /* fixed rate */
+       struct wmi_fixed_rate peer_fixed_rate;
+} __packed;
+
+#define WMI_MGMT_TID    17
+
+struct wmi_addba_clear_resp_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_addba_send_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+       /* Tid number */
+       __le32 tid;
+       /* Buffer/Window size*/
+       __le32 buffersize;
+} __packed;
+
+struct wmi_delba_send_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+       /* Tid number */
+       __le32 tid;
+       /* Is Initiator */
+       __le32 initiator;
+       /* Reason code */
+       __le32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+       /* unique id identifying the vdev, generated by the caller */
+       __le32 vdev_id;
+       /* peer mac address */
+       struct wmi_mac_addr peer_macaddr;
+       /* Tid number */
+       __le32 tid;
+       /* status code */
+       __le32 statuscode;
+} __packed;
+
+struct wmi_send_singleamsdu_cmd {
+       /* unique id identifying the vdev, generated by the caller */
+       __le32 vdev_id;
+       /* peer mac address */
+       struct wmi_mac_addr peer_macaddr;
+       /* Tid number */
+       __le32 tid;
+} __packed;
+
+enum wmi_peer_smps_state {
+       WMI_PEER_SMPS_PS_NONE = 0x0,
+       WMI_PEER_SMPS_STATIC  = 0x1,
+       WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_param {
+       WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
+       WMI_PEER_AMPDU      = 0x2,
+       WMI_PEER_AUTHORIZE  = 0x3,
+       WMI_PEER_CHAN_WIDTH = 0x4,
+       WMI_PEER_NSS        = 0x5,
+       WMI_PEER_USE_4ADDR  = 0x6
+};
+
+struct wmi_peer_set_param_cmd {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       __le32 param_id;
+       __le32 param_value;
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+struct wmi_rate_set {
+       /* total number of rates */
+       __le32 num_rates;
+       /*
+        * rates (each 8bit value) packed into a 32 bit word.
+        * the rates are filled from least significant byte to most
+        * significant byte.
+        */
+       __le32 rates[(MAX_SUPPORTED_RATES/4)+1];
+} __packed;
+
+struct wmi_rate_set_arg {
+       unsigned int num_rates;
+       u8 rates[MAX_SUPPORTED_RATES];
+};
+
+/*
+ * NOTE: It would bea good idea to represent the Tx MCS
+ * info in one word and Rx in another word. This is split
+ * into multiple words for convenience
+ */
+struct wmi_vht_rate_set {
+       __le32 rx_max_rate; /* Max Rx data rate */
+       __le32 rx_mcs_set;  /* Negotiated RX VHT rates */
+       __le32 tx_max_rate; /* Max Tx data rate */
+       __le32 tx_mcs_set;  /* Negotiated TX VHT rates */
+} __packed;
+
+struct wmi_vht_rate_set_arg {
+       u32 rx_max_rate;
+       u32 rx_mcs_set;
+       u32 tx_max_rate;
+       u32 tx_mcs_set;
+};
+
+struct wmi_peer_set_rates_cmd {
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+       /* legacy rate set */
+       struct wmi_rate_set peer_legacy_rates;
+       /* ht rate set */
+       struct wmi_rate_set peer_ht_rates;
+} __packed;
+
+struct wmi_peer_set_q_empty_callback_cmd {
+       /* unique id identifying the VDEV, generated by the caller */
+       __le32 vdev_id;
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+       __le32 callback_enable;
+} __packed;
+
+#define WMI_PEER_AUTH           0x00000001
+#define WMI_PEER_QOS            0x00000002
+#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
+#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
+#define WMI_PEER_APSD           0x00000800
+#define WMI_PEER_HT             0x00001000
+#define WMI_PEER_40MHZ          0x00002000
+#define WMI_PEER_STBC           0x00008000
+#define WMI_PEER_LDPC           0x00010000
+#define WMI_PEER_DYN_MIMOPS     0x00020000
+#define WMI_PEER_STATIC_MIMOPS  0x00040000
+#define WMI_PEER_SPATIAL_MUX    0x00200000
+#define WMI_PEER_VHT            0x02000000
+#define WMI_PEER_80MHZ          0x04000000
+#define WMI_PEER_PMF            0x08000000
+
+/*
+ * Peer rate capabilities.
+ *
+ * This is of interest to the ratecontrol
+ * module which resides in the firmware. The bit definitions are
+ * consistent with that defined in if_athrate.c.
+ */
+#define WMI_RC_DS_FLAG          0x01
+#define WMI_RC_CW40_FLAG        0x02
+#define WMI_RC_SGI_FLAG         0x04
+#define WMI_RC_HT_FLAG          0x08
+#define WMI_RC_RTSCTS_FLAG      0x10
+#define WMI_RC_TX_STBC_FLAG     0x20
+#define WMI_RC_RX_STBC_FLAG     0xC0
+#define WMI_RC_RX_STBC_FLAG_S   6
+#define WMI_RC_WEP_TKIP_FLAG    0x100
+#define WMI_RC_TS_FLAG          0x200
+#define WMI_RC_UAPSD_FLAG       0x400
+
+/* Maximum listen interval supported by hw in units of beacon interval */
+#define ATH10K_MAX_HW_LISTEN_INTERVAL 5
+
+struct wmi_peer_assoc_complete_cmd {
+       struct wmi_mac_addr peer_macaddr;
+       __le32 vdev_id;
+       __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */
+       __le32 peer_associd; /* 16 LSBs */
+       __le32 peer_flags;
+       __le32 peer_caps; /* 16 LSBs */
+       __le32 peer_listen_intval;
+       __le32 peer_ht_caps;
+       __le32 peer_max_mpdu;
+       __le32 peer_mpdu_density; /* 0..16 */
+       __le32 peer_rate_caps;
+       struct wmi_rate_set peer_legacy_rates;
+       struct wmi_rate_set peer_ht_rates;
+       __le32 peer_nss; /* num of spatial streams */
+       __le32 peer_vht_caps;
+       __le32 peer_phymode;
+       struct wmi_vht_rate_set peer_vht_rates;
+       /* HT Operation Element of the peer. Five bytes packed in 2
+        *  INT32 array and filled from lsb to msb. */
+       __le32 peer_ht_info[2];
+} __packed;
+
+struct wmi_peer_assoc_complete_arg {
+       u8 addr[ETH_ALEN];
+       u32 vdev_id;
+       bool peer_reassoc;
+       u16 peer_aid;
+       u32 peer_flags; /* see %WMI_PEER_ */
+       u16 peer_caps;
+       u32 peer_listen_intval;
+       u32 peer_ht_caps;
+       u32 peer_max_mpdu;
+       u32 peer_mpdu_density; /* 0..16 */
+       u32 peer_rate_caps; /* see %WMI_RC_ */
+       struct wmi_rate_set_arg peer_legacy_rates;
+       struct wmi_rate_set_arg peer_ht_rates;
+       u32 peer_num_spatial_streams;
+       u32 peer_vht_caps;
+       enum wmi_phy_mode peer_phymode;
+       struct wmi_vht_rate_set_arg peer_vht_rates;
+};
+
+struct wmi_peer_add_wds_entry_cmd {
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+       /* wds MAC addr */
+       struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_remove_wds_entry_cmd {
+       /* wds MAC addr */
+       struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_q_empty_callback_event {
+       /* peer MAC address */
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+/*
+ * Channel info WMI event
+ */
+struct wmi_chan_info_event {
+       __le32 err_code;
+       __le32 freq;
+       __le32 cmd_flags;
+       __le32 noise_floor;
+       __le32 rx_clear_count;
+       __le32 cycle_count;
+} __packed;
+
+/* Beacon filter wmi command info */
+#define BCN_FLT_MAX_SUPPORTED_IES      256
+#define BCN_FLT_MAX_ELEMS_IE_LIST      (BCN_FLT_MAX_SUPPORTED_IES / 32)
+
+struct bss_bcn_stats {
+       __le32 vdev_id;
+       __le32 bss_bcnsdropped;
+       __le32 bss_bcnsdelivered;
+} __packed;
+
+struct bcn_filter_stats {
+       __le32 bcns_dropped;
+       __le32 bcns_delivered;
+       __le32 activefilters;
+       struct bss_bcn_stats bss_stats;
+} __packed;
+
+struct wmi_add_bcn_filter_cmd {
+       u32 vdev_id;
+       u32 ie_map[BCN_FLT_MAX_ELEMS_IE_LIST];
+} __packed;
+
+enum wmi_sta_keepalive_method {
+       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+       WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
+};
+
+/* note: ip4 addresses are in network byte order, i.e. big endian */
+struct wmi_sta_keepalive_arp_resp {
+       __be32 src_ip4_addr;
+       __be32 dest_ip4_addr;
+       struct wmi_mac_addr dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+       __le32 vdev_id;
+       __le32 enabled;
+       __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
+       __le32 interval; /* in seconds */
+       struct wmi_sta_keepalive_arp_resp arp_resp;
+} __packed;
+
+#define ATH10K_RTS_MAX         2347
+#define ATH10K_FRAGMT_THRESHOLD_MIN    540
+#define ATH10K_FRAGMT_THRESHOLD_MAX    2346
+
+#define WMI_MAX_EVENT 0x1000
+/* Maximum number of pending TXed WMI packets */
+#define WMI_MAX_PENDING_TX_COUNT 128
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+/* By default disable power save for IBSS */
+#define ATH10K_DEFAULT_ATIM 0
+
+struct ath10k;
+struct ath10k_vif;
+
+int ath10k_wmi_attach(struct ath10k *ar);
+void ath10k_wmi_detach(struct ath10k *ar);
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
+void ath10k_wmi_flush_tx(struct ath10k *ar);
+
+int ath10k_wmi_connect_htc_service(struct ath10k *ar);
+int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
+                               const struct wmi_channel_arg *);
+int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
+int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
+int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
+                                 u16 rd5g, u16 ctl2g, u16 ctl5g);
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
+                             u32 value);
+int ath10k_wmi_cmd_init(struct ath10k *ar);
+int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
+void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
+int ath10k_wmi_stop_scan(struct ath10k *ar,
+                        const struct wmi_stop_scan_arg *arg);
+int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
+                          enum wmi_vdev_type type,
+                          enum wmi_vdev_subtype subtype,
+                          const u8 macaddr[ETH_ALEN]);
+int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id);
+int ath10k_wmi_vdev_start(struct ath10k *ar,
+                         const struct wmi_vdev_start_request_arg *);
+int ath10k_wmi_vdev_restart(struct ath10k *ar,
+                           const struct wmi_vdev_start_request_arg *);
+int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id);
+int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+                      const u8 *bssid);
+int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
+int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+                             enum wmi_vdev_param param_id, u32 param_value);
+int ath10k_wmi_vdev_install_key(struct ath10k *ar,
+                               const struct wmi_vdev_install_key_arg *arg);
+int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
+                   const u8 peer_addr[ETH_ALEN]);
+int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
+                   const u8 peer_addr[ETH_ALEN]);
+int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
+                  const u8 peer_addr[ETH_ALEN], u32 tid_bitmap);
+int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
+                             const u8 *peer_addr,
+                             enum wmi_peer_param param_id, u32 param_value);
+int ath10k_wmi_peer_assoc(struct ath10k *ar,
+                         const struct wmi_peer_assoc_complete_arg *arg);
+int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
+                         enum wmi_sta_ps_mode psmode);
+int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
+                               enum wmi_sta_powersave_param param_id,
+                               u32 value);
+int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+                              enum wmi_ap_ps_peer_param param_id, u32 value);
+int ath10k_wmi_scan_chan_list(struct ath10k *ar,
+                             const struct wmi_scan_chan_list_arg *arg);
+int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
+int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
+                       const struct wmi_pdev_set_wmm_params_arg *arg);
+int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
+
+#endif /* _WMI_H_ */
index 7f702fe3ecc2c68f5a3300cfa7182d00ad84fe38..ce67ab791eae9d008ed4c3d318b8c8949c30f428 100644 (file)
@@ -60,6 +60,7 @@
 
 #include <asm/unaligned.h>
 
+#include <net/mac80211.h>
 #include "base.h"
 #include "reg.h"
 #include "debug.h"
@@ -666,9 +667,46 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
        return htype;
 }
 
+static struct ieee80211_rate *
+ath5k_get_rate(const struct ieee80211_hw *hw,
+              const struct ieee80211_tx_info *info,
+              struct ath5k_buf *bf, int idx)
+{
+       /*
+       * convert a ieee80211_tx_rate RC-table entry to
+       * the respective ieee80211_rate struct
+       */
+       if (bf->rates[idx].idx < 0) {
+               return NULL;
+       }
+
+       return &hw->wiphy->bands[info->band]->bitrates[ bf->rates[idx].idx ];
+}
+
+static u16
+ath5k_get_rate_hw_value(const struct ieee80211_hw *hw,
+                       const struct ieee80211_tx_info *info,
+                       struct ath5k_buf *bf, int idx)
+{
+       struct ieee80211_rate *rate;
+       u16 hw_rate;
+       u8 rc_flags;
+
+       rate = ath5k_get_rate(hw, info, bf, idx);
+       if (!rate)
+               return 0;
+
+       rc_flags = bf->rates[idx].flags;
+       hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
+                  rate->hw_value_short : rate->hw_value;
+
+       return hw_rate;
+}
+
 static int
 ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
-                 struct ath5k_txq *txq, int padsize)
+                 struct ath5k_txq *txq, int padsize,
+                 struct ieee80211_tx_control *control)
 {
        struct ath5k_desc *ds = bf->desc;
        struct sk_buff *skb = bf->skb;
@@ -688,7 +726,11 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
        bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
                        DMA_TO_DEVICE);
 
-       rate = ieee80211_get_tx_rate(ah->hw, info);
+       ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
+                              ARRAY_SIZE(bf->rates));
+
+       rate = ath5k_get_rate(ah->hw, info, bf, 0);
+
        if (!rate) {
                ret = -EINVAL;
                goto err_unmap;
@@ -698,8 +740,8 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
                flags |= AR5K_TXDESC_NOACK;
 
        rc_flags = info->control.rates[0].flags;
-       hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
-               rate->hw_value_short : rate->hw_value;
+
+       hw_rate = ath5k_get_rate_hw_value(ah->hw, info, bf, 0);
 
        pktlen = skb->len;
 
@@ -722,12 +764,13 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
                duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
                        info->control.vif, pktlen, info));
        }
+
        ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
                ieee80211_get_hdrlen_from_skb(skb), padsize,
                get_hw_packet_type(skb),
                (ah->ah_txpower.txp_requested * 2),
                hw_rate,
-               info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
+               bf->rates[0].count, keyidx, ah->ah_tx_ant, flags,
                cts_rate, duration);
        if (ret)
                goto err_unmap;
@@ -736,13 +779,15 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
        if (ah->ah_capabilities.cap_has_mrr_support) {
                memset(mrr_rate, 0, sizeof(mrr_rate));
                memset(mrr_tries, 0, sizeof(mrr_tries));
+
                for (i = 0; i < 3; i++) {
-                       rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
+
+                       rate = ath5k_get_rate(ah->hw, info, bf, i);
                        if (!rate)
                                break;
 
-                       mrr_rate[i] = rate->hw_value;
-                       mrr_tries[i] = info->control.rates[i + 1].count;
+                       mrr_rate[i] = ath5k_get_rate_hw_value(ah->hw, info, bf, i);
+                       mrr_tries[i] = bf->rates[i].count;
                }
 
                ath5k_hw_setup_mrr_tx_desc(ah, ds,
@@ -1515,7 +1560,7 @@ unlock:
 
 void
 ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
-              struct ath5k_txq *txq)
+              struct ath5k_txq *txq, struct ieee80211_tx_control *control)
 {
        struct ath5k_hw *ah = hw->priv;
        struct ath5k_buf *bf;
@@ -1555,7 +1600,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
 
        bf->skb = skb;
 
-       if (ath5k_txbuf_setup(ah, bf, txq, padsize)) {
+       if (ath5k_txbuf_setup(ah, bf, txq, padsize, control)) {
                bf->skb = NULL;
                spin_lock_irqsave(&ah->txbuflock, flags);
                list_add_tail(&bf->list, &ah->txbuf);
@@ -1571,11 +1616,13 @@ drop_packet:
 
 static void
 ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
-                        struct ath5k_txq *txq, struct ath5k_tx_status *ts)
+                        struct ath5k_txq *txq, struct ath5k_tx_status *ts,
+                        struct ath5k_buf *bf)
 {
        struct ieee80211_tx_info *info;
        u8 tries[3];
        int i;
+       int size = 0;
 
        ah->stats.tx_all_count++;
        ah->stats.tx_bytes_count += skb->len;
@@ -1587,6 +1634,9 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
 
        ieee80211_tx_info_clear_status(info);
 
+       size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
+       memcpy(info->status.rates, bf->rates, size);
+
        for (i = 0; i < ts->ts_final_idx; i++) {
                struct ieee80211_tx_rate *r =
                        &info->status.rates[i];
@@ -1663,7 +1713,7 @@ ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
 
                        dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
                                        DMA_TO_DEVICE);
-                       ath5k_tx_frame_completed(ah, skb, txq, &ts);
+                       ath5k_tx_frame_completed(ah, skb, txq, &ts, bf);
                }
 
                /*
@@ -1917,7 +1967,7 @@ ath5k_beacon_send(struct ath5k_hw *ah)
 
        skb = ieee80211_get_buffered_bc(ah->hw, vif);
        while (skb) {
-               ath5k_tx_queue(ah->hw, skb, ah->cabq);
+               ath5k_tx_queue(ah->hw, skb, ah->cabq, NULL);
 
                if (ah->cabq->txq_len >= ah->cabq->txq_max)
                        break;
@@ -2442,7 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
                        IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
                        IEEE80211_HW_SIGNAL_DBM |
                        IEEE80211_HW_MFP_CAPABLE |
-                       IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+                       IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+                       IEEE80211_HW_SUPPORTS_RC_TABLE;
 
        hw->wiphy->interface_modes =
                BIT(NL80211_IFTYPE_AP) |
index 6c94c7ff23500abaf5aceed394e0ddb7c381468a..ca9a83ceeee1d430364a690c5b10703b14fc27d9 100644 (file)
@@ -47,6 +47,7 @@ struct ath5k_hw;
 struct ath5k_txq;
 struct ieee80211_channel;
 struct ath_bus_ops;
+struct ieee80211_tx_control;
 enum nl80211_iftype;
 
 enum ath5k_srev_type {
@@ -61,11 +62,12 @@ struct ath5k_srev_name {
 };
 
 struct ath5k_buf {
-       struct list_head        list;
-       struct ath5k_desc       *desc;  /* virtual addr of desc */
-       dma_addr_t              daddr;  /* physical addr of desc */
-       struct sk_buff          *skb;   /* skbuff for buf */
-       dma_addr_t              skbaddr;/* physical addr of skb data */
+       struct list_head                list;
+       struct ath5k_desc               *desc;          /* virtual addr of desc */
+       dma_addr_t                      daddr;          /* physical addr of desc */
+       struct sk_buff                  *skb;           /* skbuff for buf */
+       dma_addr_t                      skbaddr;        /* physical addr of skb data */
+       struct ieee80211_tx_rate        rates[4];       /* number of multi-rate stages */
 };
 
 struct ath5k_vif {
@@ -103,7 +105,7 @@ int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
 void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
 void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
 void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
-                   struct ath5k_txq *txq);
+                   struct ath5k_txq *txq, struct ieee80211_tx_control *control);
 
 const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
 
index 06f86f435711e805c958ee5f98354a60aafe5f41..81b686c6a3764111e8c8e969a90d36d6ced054ea 100644 (file)
@@ -66,7 +66,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
                return;
        }
 
-       ath5k_tx_queue(hw, skb, &ah->txqs[qnum]);
+       ath5k_tx_queue(hw, skb, &ah->txqs[qnum], control);
 }
 
 
index 5c9736a94e544d0c9008480ac994c593d1b4d269..2437ad26949d1c43617204fc7c887bf196a1e2f5 100644 (file)
@@ -3175,10 +3175,21 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 {
        struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
        struct ath6kl *ar = ath6kl_priv(vif->ndev);
-       u32 id;
+       u32 id, freq;
        const struct ieee80211_mgmt *mgmt;
        bool more_data, queued;
 
+       /* default to the current channel, but use the one specified as argument
+        * if any
+        */
+       freq = vif->ch_hint;
+       if (chan)
+               freq = chan->center_freq;
+
+       /* never send freq zero to the firmware */
+       if (WARN_ON(freq == 0))
+               return -EINVAL;
+
        mgmt = (const struct ieee80211_mgmt *) buf;
        if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
            ieee80211_is_probe_resp(mgmt->frame_control) &&
@@ -3188,8 +3199,7 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                 * command to allow the target to fill in the generic IEs.
                 */
                *cookie = 0; /* TX status not supported */
-               return ath6kl_send_go_probe_resp(vif, buf, len,
-                                                chan->center_freq);
+               return ath6kl_send_go_probe_resp(vif, buf, len, freq);
        }
 
        id = vif->send_action_id++;
@@ -3205,17 +3215,14 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 
        /* AP mode Power saving processing */
        if (vif->nw_type == AP_NETWORK) {
-               queued = ath6kl_mgmt_powersave_ap(vif,
-                                       id, chan->center_freq,
-                                       wait, buf,
-                                       len, &more_data, no_cck);
+               queued = ath6kl_mgmt_powersave_ap(vif, id, freq, wait, buf, len,
+                                                 &more_data, no_cck);
                if (queued)
                        return 0;
        }
 
-       return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id,
-                                       chan->center_freq, wait,
-                                       buf, len, no_cck);
+       return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id, freq,
+                                       wait, buf, len, no_cck);
 }
 
 static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
@@ -3679,6 +3686,20 @@ err:
        return NULL;
 }
 
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support ath6kl_wowlan_support = {
+       .flags = WIPHY_WOWLAN_MAGIC_PKT |
+                WIPHY_WOWLAN_DISCONNECT |
+                WIPHY_WOWLAN_GTK_REKEY_FAILURE  |
+                WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+                WIPHY_WOWLAN_EAP_IDENTITY_REQ   |
+                WIPHY_WOWLAN_4WAY_HANDSHAKE,
+       .n_patterns = WOW_MAX_FILTERS_PER_LIST,
+       .pattern_min_len = 1,
+       .pattern_max_len = WOW_PATTERN_SIZE,
+};
+#endif
+
 int ath6kl_cfg80211_init(struct ath6kl *ar)
 {
        struct wiphy *wiphy = ar->wiphy;
@@ -3772,15 +3793,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
        wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
 
 #ifdef CONFIG_PM
-       wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
-                             WIPHY_WOWLAN_DISCONNECT |
-                             WIPHY_WOWLAN_GTK_REKEY_FAILURE  |
-                             WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
-                             WIPHY_WOWLAN_EAP_IDENTITY_REQ   |
-                             WIPHY_WOWLAN_4WAY_HANDSHAKE;
-       wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
-       wiphy->wowlan.pattern_min_len = 1;
-       wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
+       wiphy->wowlan = &ath6kl_wowlan_support;
 #endif
 
        wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS;
index fe38b836cb2651a77726af4e3d7772b591d46427..dbfd17d0a5faa33fa390b150d8d39e03a42d3746 100644 (file)
@@ -1240,20 +1240,14 @@ static ssize_t ath6kl_force_roam_write(struct file *file,
        char buf[20];
        size_t len;
        u8 bssid[ETH_ALEN];
-       int i;
-       int addr[ETH_ALEN];
 
        len = min(count, sizeof(buf) - 1);
        if (copy_from_user(buf, user_buf, len))
                return -EFAULT;
        buf[len] = '\0';
 
-       if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
-                  &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
-           != ETH_ALEN)
+       if (!mac_pton(buf, bssid))
                return -EINVAL;
-       for (i = 0; i < ETH_ALEN; i++)
-               bssid[i] = addr[i];
 
        ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid);
        if (ret)
index 40ffee6184fd3b680037da63e0d35abcf44b4b6e..6a67881f94d60b2ff9880c0e8280cfbf4ca7edb1 100644 (file)
@@ -1696,10 +1696,16 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
                                                    test_bit(WMI_READY,
                                                             &ar->flag),
                                                    WMI_TIMEOUT);
+       if (timeleft <= 0) {
+               clear_bit(WMI_READY, &ar->flag);
+               ath6kl_err("wmi is not ready or wait was interrupted: %ld\n",
+                          timeleft);
+               ret = -EIO;
+               goto err_htc_stop;
+       }
 
        ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n");
 
-
        if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
                ath6kl_info("%s %s fw %s api %d%s\n",
                            ar->hw.name,
@@ -1718,12 +1724,6 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
                goto err_htc_stop;
        }
 
-       if (!timeleft || signal_pending(current)) {
-               ath6kl_err("wmi is not ready or wait was interrupted\n");
-               ret = -EIO;
-               goto err_htc_stop;
-       }
-
        ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
 
        /* communicate the wmi protocol verision to the target */
index fb141454c6d2f3c9de1d82135aeace81b69276fd..7126bdd4236c2b1e78dd3a9d2c600dbadbb53f0e 100644 (file)
@@ -345,17 +345,17 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
 {
        struct hif_scatter_req *s_req;
        struct bus_request *bus_req;
-       int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
+       int i, scat_req_sz, scat_list_sz, size;
        u8 *virt_buf;
 
        scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
        scat_req_sz = sizeof(*s_req) + scat_list_sz;
 
        if (!virt_scat)
-               sg_sz = sizeof(struct scatterlist) * n_scat_entry;
+               size = sizeof(struct scatterlist) * n_scat_entry;
        else
-               buf_sz =  2 * L1_CACHE_BYTES +
-                         ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
+               size =  2 * L1_CACHE_BYTES +
+                       ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
 
        for (i = 0; i < n_scat_req; i++) {
                /* allocate the scatter request */
@@ -364,7 +364,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
                        return -ENOMEM;
 
                if (virt_scat) {
-                       virt_buf = kzalloc(buf_sz, GFP_KERNEL);
+                       virt_buf = kzalloc(size, GFP_KERNEL);
                        if (!virt_buf) {
                                kfree(s_req);
                                return -ENOMEM;
@@ -374,7 +374,7 @@ static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
                                (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
                } else {
                        /* allocate sglist */
-                       s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
+                       s_req->sgentries = kzalloc(size, GFP_KERNEL);
 
                        if (!s_req->sgentries) {
                                kfree(s_req);
index bed0d337712d3a6427fe5cee0a66e585fbd92fa9..f38ff6a6255e7be58c4687a2ff85115c8250225c 100644 (file)
@@ -1061,6 +1061,22 @@ static void ath6kl_usb_cleanup_scatter(struct ath6kl *ar)
        return;
 }
 
+static int ath6kl_usb_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+       /*
+        * cfg80211 suspend/WOW currently not supported for USB.
+        */
+       return 0;
+}
+
+static int ath6kl_usb_resume(struct ath6kl *ar)
+{
+       /*
+        * cfg80211 resume currently not supported for USB.
+        */
+       return 0;
+}
+
 static const struct ath6kl_hif_ops ath6kl_usb_ops = {
        .diag_read32 = ath6kl_usb_diag_read32,
        .diag_write32 = ath6kl_usb_diag_write32,
@@ -1074,6 +1090,8 @@ static const struct ath6kl_hif_ops ath6kl_usb_ops = {
        .pipe_map_service = ath6kl_usb_map_service_pipe,
        .pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number,
        .cleanup_scatter = ath6kl_usb_cleanup_scatter,
+       .suspend = ath6kl_usb_suspend,
+       .resume = ath6kl_usb_resume,
 };
 
 /* ath6kl usb driver registered functions */
@@ -1152,7 +1170,7 @@ static void ath6kl_usb_remove(struct usb_interface *interface)
 
 #ifdef CONFIG_PM
 
-static int ath6kl_usb_suspend(struct usb_interface *interface,
+static int ath6kl_usb_pm_suspend(struct usb_interface *interface,
                              pm_message_t message)
 {
        struct ath6kl_usb *device;
@@ -1162,7 +1180,7 @@ static int ath6kl_usb_suspend(struct usb_interface *interface,
        return 0;
 }
 
-static int ath6kl_usb_resume(struct usb_interface *interface)
+static int ath6kl_usb_pm_resume(struct usb_interface *interface)
 {
        struct ath6kl_usb *device;
        device = usb_get_intfdata(interface);
@@ -1175,7 +1193,7 @@ static int ath6kl_usb_resume(struct usb_interface *interface)
        return 0;
 }
 
-static int ath6kl_usb_reset_resume(struct usb_interface *intf)
+static int ath6kl_usb_pm_reset_resume(struct usb_interface *intf)
 {
        if (usb_get_intfdata(intf))
                ath6kl_usb_remove(intf);
@@ -1184,9 +1202,9 @@ static int ath6kl_usb_reset_resume(struct usb_interface *intf)
 
 #else
 
-#define ath6kl_usb_suspend NULL
-#define ath6kl_usb_resume NULL
-#define ath6kl_usb_reset_resume NULL
+#define ath6kl_usb_pm_suspend NULL
+#define ath6kl_usb_pm_resume NULL
+#define ath6kl_usb_pm_reset_resume NULL
 
 #endif
 
@@ -1201,9 +1219,9 @@ MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids);
 static struct usb_driver ath6kl_usb_driver = {
        .name = "ath6kl_usb",
        .probe = ath6kl_usb_probe,
-       .suspend = ath6kl_usb_suspend,
-       .resume = ath6kl_usb_resume,
-       .reset_resume = ath6kl_usb_reset_resume,
+       .suspend = ath6kl_usb_pm_suspend,
+       .resume = ath6kl_usb_pm_resume,
+       .reset_resume = ath6kl_usb_pm_reset_resume,
        .disconnect = ath6kl_usb_remove,
        .id_table = ath6kl_usb_ids,
        .supports_autosuspend = true,
index 3c2cbc9d6295df0e16afce4a4d541bde93f9164a..760ab3fe09e2f753c360e1b1a149700ef5beca04 100644 (file)
@@ -84,14 +84,6 @@ config ATH9K_DFS_CERTIFIED
          developed. At this point enabling this option won't do anything
          except increase code size.
 
-config ATH9K_MAC_DEBUG
-       bool "Atheros MAC statistics"
-       depends on ATH9K_DEBUGFS
-       default y
-       ---help---
-         This option enables collection of statistics for Rx/Tx status
-         data and some other MAC related statistics
-
 config ATH9K_LEGACY_RATE_CONTROL
        bool "Atheros ath9k rate control"
        depends on ATH9K
index 7ecd40f07a7442d4769c6e0e60331f9824f3d217..4994bea809eb00db15a8899ee5d2dd966002db4a 100644 (file)
@@ -46,8 +46,8 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = {
        {  5,  4,  1  }, /* lvl 5 */
        {  6,  5,  1  }, /* lvl 6 */
        {  7,  6,  1  }, /* lvl 7 */
-       {  7,  6,  0  }, /* lvl 8 */
-       {  7,  7,  0  }  /* lvl 9 */
+       {  7,  7,  1  }, /* lvl 8 */
+       {  7,  8,  0  }  /* lvl 9 */
 };
 #define ATH9K_ANI_OFDM_NUM_LEVEL \
        ARRAY_SIZE(ofdm_level_table)
@@ -91,8 +91,8 @@ static const struct ani_cck_level_entry cck_level_table[] = {
        {  4,  0  }, /* lvl 4 */
        {  5,  0  }, /* lvl 5 */
        {  6,  0  }, /* lvl 6 */
-       {  6,  0  }, /* lvl 7 (only for high rssi) */
-       {  7,  0  }  /* lvl 8 (only for high rssi) */
+       {  7,  0  }, /* lvl 7 (only for high rssi) */
+       {  8,  0  }  /* lvl 8 (only for high rssi) */
 };
 
 #define ATH9K_ANI_CCK_NUM_LEVEL \
@@ -118,10 +118,10 @@ static void ath9k_ani_restart(struct ath_hw *ah)
 {
        struct ar5416AniState *aniState;
 
-       if (!DO_ANI(ah))
+       if (!ah->curchan)
                return;
 
-       aniState = &ah->curchan->ani;
+       aniState = &ah->ani;
        aniState->listenTime = 0;
 
        ENABLE_REGWRITE_BUFFER(ah);
@@ -143,7 +143,7 @@ static void ath9k_ani_restart(struct ath_hw *ah)
 static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
                                  bool scan)
 {
-       struct ar5416AniState *aniState = &ah->curchan->ani;
+       struct ar5416AniState *aniState = &ah->ani;
        struct ath_common *common = ath9k_hw_common(ah);
        const struct ani_ofdm_level_entry *entry_ofdm;
        const struct ani_cck_level_entry *entry_cck;
@@ -177,10 +177,15 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
            BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH)
                weak_sig = true;
 
-       if (aniState->ofdmWeakSigDetect != weak_sig)
-                       ath9k_hw_ani_control(ah,
-                               ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
-                               entry_ofdm->ofdm_weak_signal_on);
+       /*
+        * OFDM Weak signal detection is always enabled for AP mode.
+        */
+       if (ah->opmode != NL80211_IFTYPE_AP &&
+           aniState->ofdmWeakSigDetect != weak_sig) {
+               ath9k_hw_ani_control(ah,
+                                    ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+                                    entry_ofdm->ofdm_weak_signal_on);
+       }
 
        if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
                ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
@@ -195,10 +200,10 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
 {
        struct ar5416AniState *aniState;
 
-       if (!DO_ANI(ah))
+       if (!ah->curchan)
                return;
 
-       aniState = &ah->curchan->ani;
+       aniState = &ah->ani;
 
        if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
                ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false);
@@ -210,7 +215,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
 static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
                                 bool scan)
 {
-       struct ar5416AniState *aniState = &ah->curchan->ani;
+       struct ar5416AniState *aniState = &ah->ani;
        struct ath_common *common = ath9k_hw_common(ah);
        const struct ani_ofdm_level_entry *entry_ofdm;
        const struct ani_cck_level_entry *entry_cck;
@@ -251,10 +256,10 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
 {
        struct ar5416AniState *aniState;
 
-       if (!DO_ANI(ah))
+       if (!ah->curchan)
                return;
 
-       aniState = &ah->curchan->ani;
+       aniState = &ah->ani;
 
        if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
                ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1,
@@ -269,7 +274,7 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
 {
        struct ar5416AniState *aniState;
 
-       aniState = &ah->curchan->ani;
+       aniState = &ah->ani;
 
        /* lower OFDM noise immunity */
        if (aniState->ofdmNoiseImmunityLevel > 0 &&
@@ -292,12 +297,12 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
  */
 void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
 {
-       struct ar5416AniState *aniState = &ah->curchan->ani;
+       struct ar5416AniState *aniState = &ah->ani;
        struct ath9k_channel *chan = ah->curchan;
        struct ath_common *common = ath9k_hw_common(ah);
        int ofdm_nil, cck_nil;
 
-       if (!DO_ANI(ah))
+       if (!ah->curchan)
                return;
 
        BUG_ON(aniState == NULL);
@@ -363,24 +368,13 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
        ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning);
        ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning);
 
-       /*
-        * enable phy counters if hw supports or if not, enable phy
-        * interrupts (so we can count each one)
-        */
        ath9k_ani_restart(ah);
-
-       ENABLE_REGWRITE_BUFFER(ah);
-
-       REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
-       REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
-
-       REGWRITE_BUFFER_FLUSH(ah);
 }
 
 static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
-       struct ar5416AniState *aniState = &ah->curchan->ani;
+       struct ar5416AniState *aniState = &ah->ani;
        u32 phyCnt1, phyCnt2;
        int32_t listenTime;
 
@@ -415,10 +409,10 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
        struct ath_common *common = ath9k_hw_common(ah);
        u32 ofdmPhyErrRate, cckPhyErrRate;
 
-       if (!DO_ANI(ah))
+       if (!ah->curchan)
                return;
 
-       aniState = &ah->curchan->ani;
+       aniState = &ah->ani;
        if (!ath9k_hw_ani_read_counters(ah))
                return;
 
@@ -490,32 +484,22 @@ EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
 void ath9k_hw_ani_init(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
-       int i;
+       struct ar5416AniState *ani = &ah->ani;
 
        ath_dbg(common, ANI, "Initialize ANI\n");
 
        ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
        ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
-
        ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
        ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
 
-       for (i = 0; i < ARRAY_SIZE(ah->channels); i++) {
-               struct ath9k_channel *chan = &ah->channels[i];
-               struct ar5416AniState *ani = &chan->ani;
-
-               ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
-
-               ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
-
-               ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false;
-
-               ani->ofdmsTurn = true;
-
-               ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
-               ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
-               ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
-       }
+       ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
+       ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
+       ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false;
+       ani->ofdmsTurn = true;
+       ani->ofdmWeakSigDetect = true;
+       ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
+       ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
 
        /*
         * since we expect some ongoing maintenance on the tables, let's sanity
@@ -524,9 +508,6 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
        ah->aniperiod = ATH9K_ANI_PERIOD;
        ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL;
 
-       if (ah->config.enable_ani)
-               ah->proc_phyerr |= HAL_PROCESS_ANI;
-
        ath9k_ani_restart(ah);
        ath9k_enable_mib_counters(ah);
 }
index dddb1361039a71de9dbd205bba879e0dfcb8a14a..b54a3fb0188391eba46a797f923a15b40fe34b66 100644 (file)
 #ifndef ANI_H
 #define ANI_H
 
-#define HAL_PROCESS_ANI           0x00000001
-
-#define DO_ANI(ah) (((ah)->proc_phyerr & HAL_PROCESS_ANI) && ah->curchan)
-
 #define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
 
 /* units are errors per second */
-#define ATH9K_ANI_OFDM_TRIG_HIGH          3500
+#define ATH9K_ANI_OFDM_TRIG_HIGH           3500
 #define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
 
-/* units are errors per second */
 #define ATH9K_ANI_OFDM_TRIG_LOW           400
 #define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
 
-/* units are errors per second */
 #define ATH9K_ANI_CCK_TRIG_HIGH           600
-
-/* units are errors per second */
 #define ATH9K_ANI_CCK_TRIG_LOW            300
 
-#define ATH9K_ANI_NOISE_IMMUNE_LVL        4
-#define ATH9K_ANI_USE_OFDM_WEAK_SIG       true
-#define ATH9K_ANI_CCK_WEAK_SIG_THR        false
-
 #define ATH9K_ANI_SPUR_IMMUNE_LVL         3
-
 #define ATH9K_ANI_FIRSTEP_LVL             2
 
 #define ATH9K_ANI_RSSI_THR_HIGH           40
 /* in ms */
 #define ATH9K_ANI_POLLINTERVAL            1000
 
-#define HAL_NOISE_IMMUNE_MAX              4
-#define HAL_SPUR_IMMUNE_MAX               7
-#define HAL_FIRST_STEP_MAX                2
-
 #define ATH9K_SIG_FIRSTEP_SETTING_MIN     0
 #define ATH9K_SIG_FIRSTEP_SETTING_MAX     20
 #define ATH9K_SIG_SPUR_IMM_SETTING_MIN    0
@@ -111,7 +94,7 @@ struct ar5416AniState {
        u8 mrcCCK;
        u8 spurImmunityLevel;
        u8 firstepLevel;
-       u8 ofdmWeakSigDetect;
+       bool ofdmWeakSigDetect;
        u32 listenTime;
        u32 ofdmPhyErrCount;
        u32 cckPhyErrCount;
@@ -119,8 +102,6 @@ struct ar5416AniState {
 };
 
 struct ar5416Stats {
-       u32 ast_ani_niup;
-       u32 ast_ani_nidown;
        u32 ast_ani_spurup;
        u32 ast_ani_spurdown;
        u32 ast_ani_ofdmon;
index 391da5ad6a99c54d8990790f26fc5c932def2193..d1acfe98918a2a50689c2760ed6455cf2c239b2a 100644 (file)
@@ -931,7 +931,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
 {
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_channel *chan = ah->curchan;
-       struct ar5416AniState *aniState = &chan->ani;
+       struct ar5416AniState *aniState = &ah->ani;
        s32 value, value2;
 
        switch (cmd & ah->ani_function) {
@@ -1207,7 +1207,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_channel *chan = ah->curchan;
-       struct ar5416AniState *aniState = &chan->ani;
+       struct ar5416AniState *aniState = &ah->ani;
        struct ath9k_ani_default *iniDef;
        u32 val;
 
@@ -1251,7 +1251,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
        /* these levels just got reset to defaults by the INI */
        aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
        aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
-       aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
+       aniState->ofdmWeakSigDetect = true;
        aniState->mrcCCK = false; /* not available on pre AR9003 */
 }
 
index 830daa12feb6d50bcd49783ca440446e304564f0..8dc2d089cdef642c7b92765a68f50f1a0b50292b 100644 (file)
@@ -38,10 +38,6 @@ static int ar9002_hw_init_mode_regs(struct ath_hw *ah)
        else
                INIT_INI_ARRAY(&ah->iniPcieSerdes,
                           ar9280PciePhy_clkreq_always_on_L1_9280);
-#ifdef CONFIG_PM_SLEEP
-               INIT_INI_ARRAY(&ah->iniPcieSerdesWow,
-                              ar9280PciePhy_awow);
-#endif
 
        if (AR_SREV_9287_11_OR_LATER(ah)) {
                INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1);
index beb6162cf97cbc36e39b3611c9d2cb063f04a3d3..4d18c66a6790362483be6a3ab4be86332826686a 100644 (file)
@@ -925,20 +925,6 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
        {0x00004044, 0x00000000},
 };
 
-static const u32 ar9280PciePhy_awow[][2] = {
-       /* Addr      allmodes  */
-       {0x00004040, 0x9248fd00},
-       {0x00004040, 0x24924924},
-       {0x00004040, 0xa8000019},
-       {0x00004040, 0x13160820},
-       {0x00004040, 0xe5980560},
-       {0x00004040, 0xc01dcffd},
-       {0x00004040, 0x1aaabe41},
-       {0x00004040, 0xbe105554},
-       {0x00004040, 0x00043007},
-       {0x00004044, 0x00000000},
-};
-
 static const u32 ar9285Modes_9285_1_2[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
index e6b92ff265fd6e878274a2d0b35660a5bfa9a1c5..d105e43d22e165bc632d9fd41f9edabd04afaa8b 100644 (file)
@@ -3563,14 +3563,24 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
 {
        struct ath9k_hw_capabilities *pCap = &ah->caps;
        int chain;
-       u32 regval;
+       u32 regval, value, gpio;
        static const u32 switch_chain_reg[AR9300_MAX_CHAINS] = {
                        AR_PHY_SWITCH_CHAIN_0,
                        AR_PHY_SWITCH_CHAIN_1,
                        AR_PHY_SWITCH_CHAIN_2,
        };
 
-       u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
+       if (AR_SREV_9485(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0)) {
+               if (ah->config.xlna_gpio)
+                       gpio = ah->config.xlna_gpio;
+               else
+                       gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485;
+
+               ath9k_hw_cfg_output(ah, gpio,
+                                   AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
+       }
+
+       value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
 
        if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
@@ -3596,7 +3606,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
         *   7:4 R/W  SWITCH_TABLE_COM_SPDT_WLAN_IDLE
         * SWITCH_TABLE_COM_SPDT_WLAN_IDLE
         */
-       if (AR_SREV_9462_20(ah) || AR_SREV_9565(ah)) {
+       if (AR_SREV_9462_20_OR_LATER(ah) || AR_SREV_9565(ah)) {
                value = ar9003_switch_com_spdt_get(ah, is2ghz);
                REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
                                AR_SWITCH_TABLE_COM_SPDT_ALL, value);
@@ -3796,7 +3806,13 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
                        REG_RMW_FIELD(ah, ext_atten_reg[i],
                                      AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
 
-                       value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
+                       if (AR_SREV_9485(ah) &&
+                           (ar9003_hw_get_rx_gain_idx(ah) == 0) &&
+                           ah->config.xatten_margin_cfg)
+                               value = 5;
+                       else
+                               value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
+
                        REG_RMW_FIELD(ah, ext_atten_reg[i],
                                      AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
                                      value);
@@ -4043,8 +4059,9 @@ static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
 {
        u32 data, ko, kg;
 
-       if (!AR_SREV_9462_20(ah))
+       if (!AR_SREV_9462_20_OR_LATER(ah))
                return;
+
        ar9300_otp_read_word(ah, 1, &data);
        ko = data & 0xff;
        kg = (data >> 8) & 0xff;
@@ -4546,7 +4563,7 @@ static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah,
                                                 is2GHz);
 
        for (i = 0; i < ar9300RateSize; i++) {
-               ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
+               ath_dbg(common, REGULATORY, "TPC[%02d] 0x%08x\n",
                        i, targetPowerValT2[i]);
        }
 }
@@ -4736,7 +4753,7 @@ tempslope:
                              AR_PHY_TPC_19_ALPHA_THERM, temp_slope);
        }
 
-       if (AR_SREV_9462_20(ah))
+       if (AR_SREV_9462_20_OR_LATER(ah))
                REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
                              AR_PHY_TPC_19_B1_ALPHA_THERM, temp_slope);
 
@@ -5272,7 +5289,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
                return;
 
        for (i = 0; i < ar9300RateSize; i++) {
-               ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
+               ath_dbg(common, REGULATORY, "TPC[%02d] 0x%08x\n",
                        i, targetPowerValT2[i]);
        }
 
index a3523c969a3ace778b3daa4aabd2ac2d85761390..3101c399bc33db9534499fc2771b2bcf1cb05386 100644 (file)
@@ -24,6 +24,7 @@
 #include "ar955x_1p0_initvals.h"
 #include "ar9580_1p0_initvals.h"
 #include "ar9462_2p0_initvals.h"
+#include "ar9462_2p1_initvals.h"
 #include "ar9565_1p0_initvals.h"
 
 /* General hardware code for the AR9003 hadware family */
@@ -197,6 +198,31 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 
                INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
                                ar9485_1_1_pcie_phy_clkreq_disable_L1);
+       } else if (AR_SREV_9462_21(ah)) {
+               INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+                              ar9462_2p1_mac_core);
+               INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+                              ar9462_2p1_mac_postamble);
+               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+                              ar9462_2p1_baseband_core);
+               INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+                              ar9462_2p1_baseband_postamble);
+               INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+                              ar9462_2p1_radio_core);
+               INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+                              ar9462_2p1_radio_postamble);
+               INIT_INI_ARRAY(&ah->ini_radio_post_sys2ant,
+                              ar9462_2p1_radio_postamble_sys2ant);
+               INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+                              ar9462_2p1_soc_preamble);
+               INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+                              ar9462_2p1_soc_postamble);
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                              ar9462_2p1_common_rx_gain);
+               INIT_INI_ARRAY(&ah->iniModesFastClock,
+                              ar9462_2p1_modes_fast_clock);
+               INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+                              ar9462_2p1_baseband_core_txfir_coeff_japan_2484);
        } else if (AR_SREV_9462_20(ah)) {
 
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
@@ -407,6 +433,9 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
        else if (AR_SREV_9580(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9580_1p0_lowest_ob_db_tx_gain_table);
+       else if (AR_SREV_9462_21(ah))
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                       ar9462_2p1_modes_low_ob_db_tx_gain);
        else if (AR_SREV_9462_20(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9462_modes_low_ob_db_tx_gain_table_2p0);
@@ -438,6 +467,9 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
        else if (AR_SREV_9550(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar955x_1p0_modes_no_xpa_tx_gain_table);
+       else if (AR_SREV_9462_21(ah))
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                       ar9462_2p1_modes_high_ob_db_tx_gain);
        else if (AR_SREV_9462_20(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9462_modes_high_ob_db_tx_gain_table_2p0);
@@ -507,6 +539,9 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
        else if (AR_SREV_9580(ah))
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9580_1p0_mixed_ob_db_tx_gain_table);
+       else if (AR_SREV_9462_21(ah))
+               INIT_INI_ARRAY(&ah->iniModesTxGain,
+                      ar9462_2p1_modes_mix_ob_db_tx_gain);
        else
                INIT_INI_ARRAY(&ah->iniModesTxGain,
                        ar9300Modes_mixed_ob_db_tx_gain_table_2p2);
@@ -584,6 +619,9 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
        } else if (AR_SREV_9580(ah))
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                                ar9580_1p0_rx_gain_table);
+       else if (AR_SREV_9462_21(ah))
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                               ar9462_2p1_common_rx_gain);
        else if (AR_SREV_9462_20(ah))
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                                ar9462_common_rx_gain_table_2p0);
@@ -606,6 +644,9 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
        else if (AR_SREV_9485_11(ah))
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                        ar9485Common_wo_xlna_rx_gain_1_1);
+       else if (AR_SREV_9462_21(ah))
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                       ar9462_2p1_common_wo_xlna_rx_gain);
        else if (AR_SREV_9462_20(ah))
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                        ar9462_common_wo_xlna_rx_gain_table_2p0);
@@ -627,9 +668,40 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
 
 static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
 {
-       if (AR_SREV_9462_20(ah))
+       if (AR_SREV_9462_21(ah)) {
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                              ar9462_2p1_common_mixed_rx_gain);
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
+                              ar9462_2p1_baseband_core_mix_rxgain);
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+                              ar9462_2p1_baseband_postamble_mix_rxgain);
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+                              ar9462_2p1_baseband_postamble_5g_xlna);
+       } else if (AR_SREV_9462_20(ah)) {
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                               ar9462_common_mixed_rx_gain_table_2p0);
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_core,
+                              ar9462_2p0_baseband_core_mix_rxgain);
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+                              ar9462_2p0_baseband_postamble_mix_rxgain);
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+                              ar9462_2p0_baseband_postamble_5g_xlna);
+       }
+}
+
+static void ar9003_rx_gain_table_mode3(struct ath_hw *ah)
+{
+       if (AR_SREV_9462_21(ah)) {
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                              ar9462_2p1_common_5g_xlna_only_rx_gain);
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+                              ar9462_2p1_baseband_postamble_5g_xlna);
+       } else if (AR_SREV_9462_20(ah)) {
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                              ar9462_2p0_5g_xlna_only_rxgain);
+               INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+                              ar9462_2p0_baseband_postamble_5g_xlna);
+       }
 }
 
 static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
@@ -645,6 +717,9 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
        case 2:
                ar9003_rx_gain_table_mode2(ah);
                break;
+       case 3:
+               ar9003_rx_gain_table_mode3(ah);
+               break;
        }
 }
 
index 301bf72c53bf5ce3de62825d87f025b0a5fb82e5..5163abd3937c85e3032b3ecd7719d72451e66f18 100644 (file)
@@ -469,6 +469,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
 
        rxs->rs_status = 0;
        rxs->rs_flags =  0;
+       rxs->flag =  0;
 
        rxs->rs_datalen = rxsp->status2 & AR_DataLen;
        rxs->rs_tstamp =  rxsp->status3;
@@ -493,8 +494,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
        rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
        rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
        rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
-       rxs->rs_flags  = (rxsp->status4 & AR_GI) ? ATH9K_RX_GI : 0;
-       rxs->rs_flags  |= (rxsp->status4 & AR_2040) ? ATH9K_RX_2040 : 0;
+       rxs->flag  |= (rxsp->status4 & AR_GI) ? RX_FLAG_SHORT_GI : 0;
+       rxs->flag  |= (rxsp->status4 & AR_2040) ? RX_FLAG_40MHZ : 0;
 
        rxs->evm0 = rxsp->status6;
        rxs->evm1 = rxsp->status7;
index 09c1f9da67a05d9bbc1f715b61f31ee62bb23f00..6343cc91953e8090e753a7e26e74cf5339ee6b1c 100644 (file)
@@ -454,6 +454,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
                if (accum_cnt <= thresh_accum_cnt)
                        continue;
 
+               max_index++;
+
                /* sum(tx amplitude) */
                accum_tx = ((data_L[i] >> 16) & 0xffff) |
                    ((data_U[i] & 0x7ff) << 16);
@@ -468,20 +470,21 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
 
                accum_tx <<= scale_factor;
                accum_rx <<= scale_factor;
-               x_est[i + 1] = (((accum_tx + accum_cnt) / accum_cnt) + 32) >>
-                   scale_factor;
+               x_est[max_index] =
+                       (((accum_tx + accum_cnt) / accum_cnt) + 32) >>
+                       scale_factor;
 
-               Y[i + 1] = ((((accum_rx + accum_cnt) / accum_cnt) + 32) >>
+               Y[max_index] =
+                       ((((accum_rx + accum_cnt) / accum_cnt) + 32) >>
                            scale_factor) +
-                           (1 << scale_factor) * max_index + 16;
+                       (1 << scale_factor) * i + 16;
 
                if (accum_ang >= (1 << 26))
                        accum_ang -= 1 << 27;
 
-               theta[i + 1] = ((accum_ang * (1 << scale_factor)) + accum_cnt) /
-                   accum_cnt;
-
-               max_index++;
+               theta[max_index] =
+                       ((accum_ang * (1 << scale_factor)) + accum_cnt) /
+                       accum_cnt;
        }
 
        /*
index e1714d7c9eeb50d0c546a9ea01ad213a8bd51215..1f694ab3cc78ba2ede3f7ca4b192c2e7dded96ab 100644 (file)
@@ -735,22 +735,53 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
                return -EINVAL;
        }
 
+       /*
+        * SOC, MAC, BB, RADIO initvals.
+        */
        for (i = 0; i < ATH_INI_NUM_SPLIT; i++) {
                ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex);
                ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
                ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
                ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
-               if (i == ATH_INI_POST && AR_SREV_9462_20(ah))
+               if (i == ATH_INI_POST && AR_SREV_9462_20_OR_LATER(ah))
                        ar9003_hw_prog_ini(ah,
                                           &ah->ini_radio_post_sys2ant,
                                           modesIndex);
        }
 
+       /*
+        * RXGAIN initvals.
+        */
        REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
+
+       if (AR_SREV_9462_20_OR_LATER(ah)) {
+               /*
+                * CUS217 mix LNA mode.
+                */
+               if (ar9003_hw_get_rx_gain_idx(ah) == 2) {
+                       REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core,
+                                       1, regWrites);
+                       REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+                                       modesIndex, regWrites);
+               }
+
+               /*
+                * 5G-XLNA
+                */
+               if ((ar9003_hw_get_rx_gain_idx(ah) == 2) ||
+                   (ar9003_hw_get_rx_gain_idx(ah) == 3)) {
+                       REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+                                       modesIndex, regWrites);
+               }
+       }
+
        if (AR_SREV_9550(ah))
                REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
                                regWrites);
 
+       /*
+        * TXGAIN initvals.
+        */
        if (AR_SREV_9550(ah)) {
                int modes_txgain_index;
 
@@ -772,8 +803,14 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
                REG_WRITE_ARRAY(&ah->iniModesFastClock,
                                modesIndex, regWrites);
 
+       /*
+        * Clock frequency initvals.
+        */
        REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
 
+       /*
+        * JAPAN regulatory.
+        */
        if (chan->channel == 2484)
                ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
 
@@ -905,7 +942,12 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
 {
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_channel *chan = ah->curchan;
-       struct ar5416AniState *aniState = &chan->ani;
+       struct ar5416AniState *aniState = &ah->ani;
+       int m1ThreshLow, m2ThreshLow;
+       int m1Thresh, m2Thresh;
+       int m2CountThr, m2CountThrLow;
+       int m1ThreshLowExt, m2ThreshLowExt;
+       int m1ThreshExt, m2ThreshExt;
        s32 value, value2;
 
        switch (cmd & ah->ani_function) {
@@ -919,6 +961,61 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
                 */
                u32 on = param ? 1 : 0;
 
+               if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+                       goto skip_ws_det;
+
+               m1ThreshLow = on ?
+                       aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
+               m2ThreshLow = on ?
+                       aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
+               m1Thresh = on ?
+                       aniState->iniDef.m1Thresh : m1Thresh_off;
+               m2Thresh = on ?
+                       aniState->iniDef.m2Thresh : m2Thresh_off;
+               m2CountThr = on ?
+                       aniState->iniDef.m2CountThr : m2CountThr_off;
+               m2CountThrLow = on ?
+                       aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
+               m1ThreshLowExt = on ?
+                       aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
+               m2ThreshLowExt = on ?
+                       aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
+               m1ThreshExt = on ?
+                       aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
+               m2ThreshExt = on ?
+                       aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
+
+               REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+                             AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
+                             m1ThreshLow);
+               REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+                             AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
+                             m2ThreshLow);
+               REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+                             AR_PHY_SFCORR_M1_THRESH,
+                             m1Thresh);
+               REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+                             AR_PHY_SFCORR_M2_THRESH,
+                             m2Thresh);
+               REG_RMW_FIELD(ah, AR_PHY_SFCORR,
+                             AR_PHY_SFCORR_M2COUNT_THR,
+                             m2CountThr);
+               REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
+                             AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
+                             m2CountThrLow);
+               REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+                             AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
+                             m1ThreshLowExt);
+               REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+                             AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
+                             m2ThreshLowExt);
+               REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+                             AR_PHY_SFCORR_EXT_M1_THRESH,
+                             m1ThreshExt);
+               REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
+                             AR_PHY_SFCORR_EXT_M2_THRESH,
+                             m2ThreshExt);
+skip_ws_det:
                if (on)
                        REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
                                    AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
@@ -1173,7 +1270,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
        struct ath9k_ani_default *iniDef;
        u32 val;
 
-       aniState = &ah->curchan->ani;
+       aniState = &ah->ani;
        iniDef = &aniState->iniDef;
 
        ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
@@ -1214,7 +1311,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
        /* these levels just got reset to defaults by the INI */
        aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
        aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
-       aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
+       aniState->ofdmWeakSigDetect = true;
        aniState->mrcCCK = true;
 }
 
@@ -1415,7 +1512,7 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
        ar9003_hw_prog_ini(ah, &ah->iniBB[ATH_INI_POST], modesIndex);
        ar9003_hw_prog_ini(ah, &ah->iniRadio[ATH_INI_POST], modesIndex);
 
-       if (AR_SREV_9462_20(ah))
+       if (AR_SREV_9462_20_OR_LATER(ah))
                ar9003_hw_prog_ini(ah, &ah->ini_radio_post_sys2ant,
                                   modesIndex);
 
index e71774196c01bbf7fa6f3a33b1789556fc63ae9a..d4d39f305a0b0177fbb7289a360f6d75849a58ea 100644 (file)
 
 #define AR_PHY_CCA_NOM_VAL_9330_2GHZ          -118
 
+#define AR9300_EXT_LNA_CTL_GPIO_AR9485 9
+
 /*
  * AGC Field Definitions
  */
 #define AR_PHY_TPC_5_B1         (AR_SM1_BASE + 0x208)
 #define AR_PHY_TPC_6_B1         (AR_SM1_BASE + 0x20c)
 #define AR_PHY_TPC_11_B1        (AR_SM1_BASE + 0x220)
-#define AR_PHY_PDADC_TAB_1     (AR_SM1_BASE + (AR_SREV_AR9462(ah) ? \
+#define AR_PHY_PDADC_TAB_1     (AR_SM1_BASE + (AR_SREV_9462_20_OR_LATER(ah) ? \
                                        0x280 : 0x240))
 #define AR_PHY_TPC_19_B1       (AR_SM1_BASE + 0x240)
 #define AR_PHY_TPC_19_B1_ALPHA_THERM           0xff
 #define AR_GLB_GPIO_CONTROL    (AR_GLB_BASE)
 #define AR_PHY_GLB_CONTROL     (AR_GLB_BASE + 0x44)
 #define AR_GLB_SCRATCH(_ah)    (AR_GLB_BASE + \
-                                       (AR_SREV_9462_20(_ah) ? 0x4c : 0x50))
+                                       (AR_SREV_9462_20_OR_LATER(_ah) ? 0x4c : 0x50))
 #define AR_GLB_STATUS          (AR_GLB_BASE + 0x48)
 
 /*
index 999ab08c34e689fd9179b0335bc475bbd46bdfbe..1d6b705ba110f38bfb93b05bd402cf89b45fcc75 100644 (file)
@@ -78,7 +78,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
        {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
        {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
        {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
-       {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
+       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
        {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
        {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1449,4 +1449,284 @@ static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = {
        {0x0000b1fc, 0x00000196},
 };
 
+static const u32 ar9462_2p0_baseband_postamble_5g_xlna[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+};
+
+static const u32 ar9462_2p0_5g_xlna_only_rxgain[][2] = {
+       /* Addr      allmodes  */
+       {0x0000a000, 0x00010000},
+       {0x0000a004, 0x00030002},
+       {0x0000a008, 0x00050004},
+       {0x0000a00c, 0x00810080},
+       {0x0000a010, 0x00830082},
+       {0x0000a014, 0x01810180},
+       {0x0000a018, 0x01830182},
+       {0x0000a01c, 0x01850184},
+       {0x0000a020, 0x01890188},
+       {0x0000a024, 0x018b018a},
+       {0x0000a028, 0x018d018c},
+       {0x0000a02c, 0x03820190},
+       {0x0000a030, 0x03840383},
+       {0x0000a034, 0x03880385},
+       {0x0000a038, 0x038a0389},
+       {0x0000a03c, 0x038c038b},
+       {0x0000a040, 0x0390038d},
+       {0x0000a044, 0x03920391},
+       {0x0000a048, 0x03940393},
+       {0x0000a04c, 0x03960395},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x29292929},
+       {0x0000a084, 0x29292929},
+       {0x0000a088, 0x29292929},
+       {0x0000a08c, 0x29292929},
+       {0x0000a090, 0x22292929},
+       {0x0000a094, 0x1d1d2222},
+       {0x0000a098, 0x0c111117},
+       {0x0000a09c, 0x00030303},
+       {0x0000a0a0, 0x00000000},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x001f0000},
+       {0x0000a0c4, 0x01000101},
+       {0x0000a0c8, 0x011e011f},
+       {0x0000a0cc, 0x011c011d},
+       {0x0000a0d0, 0x02030204},
+       {0x0000a0d4, 0x02010202},
+       {0x0000a0d8, 0x021f0200},
+       {0x0000a0dc, 0x0302021e},
+       {0x0000a0e0, 0x03000301},
+       {0x0000a0e4, 0x031e031f},
+       {0x0000a0e8, 0x0402031d},
+       {0x0000a0ec, 0x04000401},
+       {0x0000a0f0, 0x041e041f},
+       {0x0000a0f4, 0x0502041d},
+       {0x0000a0f8, 0x05000501},
+       {0x0000a0fc, 0x051e051f},
+       {0x0000a100, 0x06010602},
+       {0x0000a104, 0x061f0600},
+       {0x0000a108, 0x061d061e},
+       {0x0000a10c, 0x07020703},
+       {0x0000a110, 0x07000701},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x001f0000},
+       {0x0000a144, 0x01000101},
+       {0x0000a148, 0x011e011f},
+       {0x0000a14c, 0x011c011d},
+       {0x0000a150, 0x02030204},
+       {0x0000a154, 0x02010202},
+       {0x0000a158, 0x021f0200},
+       {0x0000a15c, 0x0302021e},
+       {0x0000a160, 0x03000301},
+       {0x0000a164, 0x031e031f},
+       {0x0000a168, 0x0402031d},
+       {0x0000a16c, 0x04000401},
+       {0x0000a170, 0x041e041f},
+       {0x0000a174, 0x0502041d},
+       {0x0000a178, 0x05000501},
+       {0x0000a17c, 0x051e051f},
+       {0x0000a180, 0x06010602},
+       {0x0000a184, 0x061f0600},
+       {0x0000a188, 0x061d061e},
+       {0x0000a18c, 0x07020703},
+       {0x0000a190, 0x07000701},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000196},
+       {0x0000b000, 0x00010000},
+       {0x0000b004, 0x00030002},
+       {0x0000b008, 0x00050004},
+       {0x0000b00c, 0x00810080},
+       {0x0000b010, 0x00830082},
+       {0x0000b014, 0x01810180},
+       {0x0000b018, 0x01830182},
+       {0x0000b01c, 0x01850184},
+       {0x0000b020, 0x02810280},
+       {0x0000b024, 0x02830282},
+       {0x0000b028, 0x02850284},
+       {0x0000b02c, 0x02890288},
+       {0x0000b030, 0x028b028a},
+       {0x0000b034, 0x0388028c},
+       {0x0000b038, 0x038a0389},
+       {0x0000b03c, 0x038c038b},
+       {0x0000b040, 0x0390038d},
+       {0x0000b044, 0x03920391},
+       {0x0000b048, 0x03940393},
+       {0x0000b04c, 0x03960395},
+       {0x0000b050, 0x00000000},
+       {0x0000b054, 0x00000000},
+       {0x0000b058, 0x00000000},
+       {0x0000b05c, 0x00000000},
+       {0x0000b060, 0x00000000},
+       {0x0000b064, 0x00000000},
+       {0x0000b068, 0x00000000},
+       {0x0000b06c, 0x00000000},
+       {0x0000b070, 0x00000000},
+       {0x0000b074, 0x00000000},
+       {0x0000b078, 0x00000000},
+       {0x0000b07c, 0x00000000},
+       {0x0000b080, 0x2a2d2f32},
+       {0x0000b084, 0x21232328},
+       {0x0000b088, 0x19191c1e},
+       {0x0000b08c, 0x12141417},
+       {0x0000b090, 0x07070e0e},
+       {0x0000b094, 0x03030305},
+       {0x0000b098, 0x00000003},
+       {0x0000b09c, 0x00000000},
+       {0x0000b0a0, 0x00000000},
+       {0x0000b0a4, 0x00000000},
+       {0x0000b0a8, 0x00000000},
+       {0x0000b0ac, 0x00000000},
+       {0x0000b0b0, 0x00000000},
+       {0x0000b0b4, 0x00000000},
+       {0x0000b0b8, 0x00000000},
+       {0x0000b0bc, 0x00000000},
+       {0x0000b0c0, 0x003f0020},
+       {0x0000b0c4, 0x00400041},
+       {0x0000b0c8, 0x0140005f},
+       {0x0000b0cc, 0x0160015f},
+       {0x0000b0d0, 0x017e017f},
+       {0x0000b0d4, 0x02410242},
+       {0x0000b0d8, 0x025f0240},
+       {0x0000b0dc, 0x027f0260},
+       {0x0000b0e0, 0x0341027e},
+       {0x0000b0e4, 0x035f0340},
+       {0x0000b0e8, 0x037f0360},
+       {0x0000b0ec, 0x04400441},
+       {0x0000b0f0, 0x0460045f},
+       {0x0000b0f4, 0x0541047f},
+       {0x0000b0f8, 0x055f0540},
+       {0x0000b0fc, 0x057f0560},
+       {0x0000b100, 0x06400641},
+       {0x0000b104, 0x0660065f},
+       {0x0000b108, 0x067e067f},
+       {0x0000b10c, 0x07410742},
+       {0x0000b110, 0x075f0740},
+       {0x0000b114, 0x077f0760},
+       {0x0000b118, 0x07800781},
+       {0x0000b11c, 0x07a0079f},
+       {0x0000b120, 0x07c107bf},
+       {0x0000b124, 0x000007c0},
+       {0x0000b128, 0x00000000},
+       {0x0000b12c, 0x00000000},
+       {0x0000b130, 0x00000000},
+       {0x0000b134, 0x00000000},
+       {0x0000b138, 0x00000000},
+       {0x0000b13c, 0x00000000},
+       {0x0000b140, 0x003f0020},
+       {0x0000b144, 0x00400041},
+       {0x0000b148, 0x0140005f},
+       {0x0000b14c, 0x0160015f},
+       {0x0000b150, 0x017e017f},
+       {0x0000b154, 0x02410242},
+       {0x0000b158, 0x025f0240},
+       {0x0000b15c, 0x027f0260},
+       {0x0000b160, 0x0341027e},
+       {0x0000b164, 0x035f0340},
+       {0x0000b168, 0x037f0360},
+       {0x0000b16c, 0x04400441},
+       {0x0000b170, 0x0460045f},
+       {0x0000b174, 0x0541047f},
+       {0x0000b178, 0x055f0540},
+       {0x0000b17c, 0x057f0560},
+       {0x0000b180, 0x06400641},
+       {0x0000b184, 0x0660065f},
+       {0x0000b188, 0x067e067f},
+       {0x0000b18c, 0x07410742},
+       {0x0000b190, 0x075f0740},
+       {0x0000b194, 0x077f0760},
+       {0x0000b198, 0x07800781},
+       {0x0000b19c, 0x07a0079f},
+       {0x0000b1a0, 0x07c107bf},
+       {0x0000b1a4, 0x000007c0},
+       {0x0000b1a8, 0x00000000},
+       {0x0000b1ac, 0x00000000},
+       {0x0000b1b0, 0x00000000},
+       {0x0000b1b4, 0x00000000},
+       {0x0000b1b8, 0x00000000},
+       {0x0000b1bc, 0x00000000},
+       {0x0000b1c0, 0x00000000},
+       {0x0000b1c4, 0x00000000},
+       {0x0000b1c8, 0x00000000},
+       {0x0000b1cc, 0x00000000},
+       {0x0000b1d0, 0x00000000},
+       {0x0000b1d4, 0x00000000},
+       {0x0000b1d8, 0x00000000},
+       {0x0000b1dc, 0x00000000},
+       {0x0000b1e0, 0x00000000},
+       {0x0000b1e4, 0x00000000},
+       {0x0000b1e8, 0x00000000},
+       {0x0000b1ec, 0x00000000},
+       {0x0000b1f0, 0x00000396},
+       {0x0000b1f4, 0x00000396},
+       {0x0000b1f8, 0x00000396},
+       {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p0_baseband_core_mix_rxgain[][2] = {
+       /* Addr      allmodes  */
+       {0x00009fd0, 0x0a2d6b93},
+};
+
+static const u32 ar9462_2p0_baseband_postamble_mix_rxgain[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
+       {0x00009824, 0x63c640de, 0x5ac640d0, 0x63c640da, 0x63c640da},
+       {0x00009828, 0x0796be89, 0x0696b081, 0x0916be81, 0x0916be81},
+       {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000d8, 0x6c4000d8},
+       {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec86d2e},
+       {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32395c5e},
+};
+
 #endif /* INITVALS_9462_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
new file mode 100644 (file)
index 0000000..4dbc294
--- /dev/null
@@ -0,0 +1,1774 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_9462_2P1_H
+#define INITVALS_9462_2P1_H
+
+/* AR9462 2.1 */
+
+static const u32 ar9462_2p1_mac_core[][2] = {
+       /* Addr      allmodes  */
+       {0x00000008, 0x00000000},
+       {0x00000030, 0x000e0085},
+       {0x00000034, 0x00000005},
+       {0x00000040, 0x00000000},
+       {0x00000044, 0x00000000},
+       {0x00000048, 0x00000008},
+       {0x0000004c, 0x00000010},
+       {0x00000050, 0x00000000},
+       {0x00001040, 0x002ffc0f},
+       {0x00001044, 0x002ffc0f},
+       {0x00001048, 0x002ffc0f},
+       {0x0000104c, 0x002ffc0f},
+       {0x00001050, 0x002ffc0f},
+       {0x00001054, 0x002ffc0f},
+       {0x00001058, 0x002ffc0f},
+       {0x0000105c, 0x002ffc0f},
+       {0x00001060, 0x002ffc0f},
+       {0x00001064, 0x002ffc0f},
+       {0x000010f0, 0x00000100},
+       {0x00001270, 0x00000000},
+       {0x000012b0, 0x00000000},
+       {0x000012f0, 0x00000000},
+       {0x0000143c, 0x00000000},
+       {0x0000147c, 0x00000000},
+       {0x00001810, 0x0f000003},
+       {0x00008000, 0x00000000},
+       {0x00008004, 0x00000000},
+       {0x00008008, 0x00000000},
+       {0x0000800c, 0x00000000},
+       {0x00008018, 0x00000000},
+       {0x00008020, 0x00000000},
+       {0x00008038, 0x00000000},
+       {0x0000803c, 0x00080000},
+       {0x00008040, 0x00000000},
+       {0x00008044, 0x00000000},
+       {0x00008048, 0x00000000},
+       {0x0000804c, 0xffffffff},
+       {0x00008054, 0x00000000},
+       {0x00008058, 0x00000000},
+       {0x0000805c, 0x000fc78f},
+       {0x00008060, 0x0000000f},
+       {0x00008064, 0x00000000},
+       {0x00008070, 0x00000310},
+       {0x00008074, 0x00000020},
+       {0x00008078, 0x00000000},
+       {0x0000809c, 0x0000000f},
+       {0x000080a0, 0x00000000},
+       {0x000080a4, 0x02ff0000},
+       {0x000080a8, 0x0e070605},
+       {0x000080ac, 0x0000000d},
+       {0x000080b0, 0x00000000},
+       {0x000080b4, 0x00000000},
+       {0x000080b8, 0x00000000},
+       {0x000080bc, 0x00000000},
+       {0x000080c0, 0x2a800000},
+       {0x000080c4, 0x06900168},
+       {0x000080c8, 0x13881c20},
+       {0x000080cc, 0x01f40000},
+       {0x000080d0, 0x00252500},
+       {0x000080d4, 0x00b00005},
+       {0x000080d8, 0x00400002},
+       {0x000080dc, 0x00000000},
+       {0x000080e0, 0xffffffff},
+       {0x000080e4, 0x0000ffff},
+       {0x000080e8, 0x3f3f3f3f},
+       {0x000080ec, 0x00000000},
+       {0x000080f0, 0x00000000},
+       {0x000080f4, 0x00000000},
+       {0x000080fc, 0x00020000},
+       {0x00008100, 0x00000000},
+       {0x00008108, 0x00000052},
+       {0x0000810c, 0x00000000},
+       {0x00008110, 0x00000000},
+       {0x00008114, 0x000007ff},
+       {0x00008118, 0x000000aa},
+       {0x0000811c, 0x00003210},
+       {0x00008124, 0x00000000},
+       {0x00008128, 0x00000000},
+       {0x0000812c, 0x00000000},
+       {0x00008130, 0x00000000},
+       {0x00008134, 0x00000000},
+       {0x00008138, 0x00000000},
+       {0x0000813c, 0x0000ffff},
+       {0x00008144, 0xffffffff},
+       {0x00008168, 0x00000000},
+       {0x0000816c, 0x00000000},
+       {0x00008170, 0x18486e00},
+       {0x00008174, 0x33332210},
+       {0x00008178, 0x00000000},
+       {0x0000817c, 0x00020000},
+       {0x000081c4, 0x33332210},
+       {0x000081c8, 0x00000000},
+       {0x000081cc, 0x00000000},
+       {0x000081d4, 0x00000000},
+       {0x000081ec, 0x00000000},
+       {0x000081f0, 0x00000000},
+       {0x000081f4, 0x00000000},
+       {0x000081f8, 0x00000000},
+       {0x000081fc, 0x00000000},
+       {0x00008240, 0x00100000},
+       {0x00008244, 0x0010f400},
+       {0x00008248, 0x00000800},
+       {0x0000824c, 0x0001e800},
+       {0x00008250, 0x00000000},
+       {0x00008254, 0x00000000},
+       {0x00008258, 0x00000000},
+       {0x0000825c, 0x40000000},
+       {0x00008260, 0x00080922},
+       {0x00008264, 0x99c00010},
+       {0x00008268, 0xffffffff},
+       {0x0000826c, 0x0000ffff},
+       {0x00008270, 0x00000000},
+       {0x00008274, 0x40000000},
+       {0x00008278, 0x003e4180},
+       {0x0000827c, 0x00000004},
+       {0x00008284, 0x0000002c},
+       {0x00008288, 0x0000002c},
+       {0x0000828c, 0x000000ff},
+       {0x00008294, 0x00000000},
+       {0x00008298, 0x00000000},
+       {0x0000829c, 0x00000000},
+       {0x00008300, 0x00000140},
+       {0x00008314, 0x00000000},
+       {0x0000831c, 0x0000010d},
+       {0x00008328, 0x00000000},
+       {0x0000832c, 0x0000001f},
+       {0x00008330, 0x00000302},
+       {0x00008334, 0x00000700},
+       {0x00008338, 0xffff0000},
+       {0x0000833c, 0x02400000},
+       {0x00008340, 0x000107ff},
+       {0x00008344, 0xaa48107b},
+       {0x00008348, 0x008f0000},
+       {0x0000835c, 0x00000000},
+       {0x00008360, 0xffffffff},
+       {0x00008364, 0xffffffff},
+       {0x00008368, 0x00000000},
+       {0x00008370, 0x00000000},
+       {0x00008374, 0x000000ff},
+       {0x00008378, 0x00000000},
+       {0x0000837c, 0x00000000},
+       {0x00008380, 0xffffffff},
+       {0x00008384, 0xffffffff},
+       {0x00008390, 0xffffffff},
+       {0x00008394, 0xffffffff},
+       {0x00008398, 0x00000000},
+       {0x0000839c, 0x00000000},
+       {0x000083a4, 0x0000fa14},
+       {0x000083a8, 0x000f0c00},
+       {0x000083ac, 0x33332210},
+       {0x000083b0, 0x33332210},
+       {0x000083b4, 0x33332210},
+       {0x000083b8, 0x33332210},
+       {0x000083bc, 0x00000000},
+       {0x000083c0, 0x00000000},
+       {0x000083c4, 0x00000000},
+       {0x000083c8, 0x00000000},
+       {0x000083cc, 0x00000200},
+       {0x000083d0, 0x000301ff},
+};
+
+static const u32 ar9462_2p1_mac_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+       {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+       {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+       {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+       {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+       {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+       {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+       {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar9462_2p1_baseband_core[][2] = {
+       /* Addr      allmodes  */
+       {0x00009800, 0xafe68e30},
+       {0x00009804, 0xfd14e000},
+       {0x00009808, 0x9c0a9f6b},
+       {0x0000980c, 0x04900000},
+       {0x00009814, 0x9280c00a},
+       {0x00009818, 0x00000000},
+       {0x0000981c, 0x00020028},
+       {0x00009834, 0x6400a290},
+       {0x00009838, 0x0108ecff},
+       {0x0000983c, 0x0d000600},
+       {0x00009880, 0x201fff00},
+       {0x00009884, 0x00001042},
+       {0x000098a4, 0x00200400},
+       {0x000098b0, 0x32440bbe},
+       {0x000098d0, 0x004b6a8e},
+       {0x000098d4, 0x00000820},
+       {0x000098dc, 0x00000000},
+       {0x000098e4, 0x01ffffff},
+       {0x000098e8, 0x01ffffff},
+       {0x000098ec, 0x01ffffff},
+       {0x000098f0, 0x00000000},
+       {0x000098f4, 0x00000000},
+       {0x00009bf0, 0x80000000},
+       {0x00009c04, 0xff55ff55},
+       {0x00009c08, 0x0320ff55},
+       {0x00009c0c, 0x00000000},
+       {0x00009c10, 0x00000000},
+       {0x00009c14, 0x00046384},
+       {0x00009c18, 0x05b6b440},
+       {0x00009c1c, 0x00b6b440},
+       {0x00009d00, 0xc080a333},
+       {0x00009d04, 0x40206c10},
+       {0x00009d08, 0x009c4060},
+       {0x00009d0c, 0x9883800a},
+       {0x00009d10, 0x01834061},
+       {0x00009d14, 0x00c0040b},
+       {0x00009d18, 0x00000000},
+       {0x00009e08, 0x0038230c},
+       {0x00009e24, 0x990bb515},
+       {0x00009e28, 0x0c6f0000},
+       {0x00009e30, 0x06336f77},
+       {0x00009e34, 0x6af6532f},
+       {0x00009e38, 0x0cc80c00},
+       {0x00009e40, 0x15262820},
+       {0x00009e4c, 0x00001004},
+       {0x00009e50, 0x00ff03f1},
+       {0x00009e54, 0xe4c555c2},
+       {0x00009e58, 0xfd857722},
+       {0x00009e5c, 0xe9198724},
+       {0x00009fc0, 0x803e4788},
+       {0x00009fc4, 0x0001efb5},
+       {0x00009fcc, 0x40000014},
+       {0x00009fd0, 0x0a193b93},
+       {0x0000a20c, 0x00000000},
+       {0x0000a220, 0x00000000},
+       {0x0000a224, 0x00000000},
+       {0x0000a228, 0x10002310},
+       {0x0000a23c, 0x00000000},
+       {0x0000a244, 0x0c000000},
+       {0x0000a2a0, 0x00000001},
+       {0x0000a2c0, 0x00000001},
+       {0x0000a2c8, 0x00000000},
+       {0x0000a2cc, 0x18c43433},
+       {0x0000a2d4, 0x00000000},
+       {0x0000a2ec, 0x00000000},
+       {0x0000a2f0, 0x00000000},
+       {0x0000a2f4, 0x00000000},
+       {0x0000a2f8, 0x00000000},
+       {0x0000a344, 0x00000000},
+       {0x0000a34c, 0x00000000},
+       {0x0000a350, 0x0000a000},
+       {0x0000a364, 0x00000000},
+       {0x0000a370, 0x00000000},
+       {0x0000a390, 0x00000001},
+       {0x0000a394, 0x00000444},
+       {0x0000a398, 0x001f0e0f},
+       {0x0000a39c, 0x0075393f},
+       {0x0000a3a0, 0xb79f6427},
+       {0x0000a3c0, 0x20202020},
+       {0x0000a3c4, 0x22222220},
+       {0x0000a3c8, 0x20200020},
+       {0x0000a3cc, 0x20202020},
+       {0x0000a3d0, 0x20202020},
+       {0x0000a3d4, 0x20202020},
+       {0x0000a3d8, 0x20202020},
+       {0x0000a3dc, 0x20202020},
+       {0x0000a3e0, 0x20202020},
+       {0x0000a3e4, 0x20202020},
+       {0x0000a3e8, 0x20202020},
+       {0x0000a3ec, 0x20202020},
+       {0x0000a3f0, 0x00000000},
+       {0x0000a3f4, 0x00000006},
+       {0x0000a3f8, 0x0c9bd380},
+       {0x0000a3fc, 0x000f0f01},
+       {0x0000a400, 0x8fa91f01},
+       {0x0000a404, 0x00000000},
+       {0x0000a408, 0x0e79e5c6},
+       {0x0000a40c, 0x00820820},
+       {0x0000a414, 0x1ce739ce},
+       {0x0000a418, 0x2d001dce},
+       {0x0000a434, 0x00000000},
+       {0x0000a438, 0x00001801},
+       {0x0000a43c, 0x00100000},
+       {0x0000a444, 0x00000000},
+       {0x0000a448, 0x05000080},
+       {0x0000a44c, 0x00000001},
+       {0x0000a450, 0x00010000},
+       {0x0000a454, 0x07000000},
+       {0x0000a644, 0xbfad9d74},
+       {0x0000a648, 0x0048060a},
+       {0x0000a64c, 0x00002037},
+       {0x0000a670, 0x03020100},
+       {0x0000a674, 0x09080504},
+       {0x0000a678, 0x0d0c0b0a},
+       {0x0000a67c, 0x13121110},
+       {0x0000a680, 0x31301514},
+       {0x0000a684, 0x35343332},
+       {0x0000a688, 0x00000036},
+       {0x0000a690, 0x00000838},
+       {0x0000a6b0, 0x0000000a},
+       {0x0000a6b4, 0x00512c01},
+       {0x0000a7c0, 0x00000000},
+       {0x0000a7c4, 0xfffffffc},
+       {0x0000a7c8, 0x00000000},
+       {0x0000a7cc, 0x00000000},
+       {0x0000a7d0, 0x00000000},
+       {0x0000a7d4, 0x00000004},
+       {0x0000a7dc, 0x00000000},
+       {0x0000a7f0, 0x80000000},
+       {0x0000a8d0, 0x004b6a8e},
+       {0x0000a8d4, 0x00000820},
+       {0x0000a8dc, 0x00000000},
+       {0x0000a8f0, 0x00000000},
+       {0x0000a8f4, 0x00000000},
+       {0x0000abf0, 0x80000000},
+       {0x0000b2d0, 0x00000080},
+       {0x0000b2d4, 0x00000000},
+       {0x0000b2ec, 0x00000000},
+       {0x0000b2f0, 0x00000000},
+       {0x0000b2f4, 0x00000000},
+       {0x0000b2f8, 0x00000000},
+       {0x0000b408, 0x0e79e5c0},
+       {0x0000b40c, 0x00820820},
+       {0x0000b420, 0x00000000},
+       {0x0000b6b0, 0x0000000a},
+       {0x0000b6b4, 0x00000001},
+};
+
+static const u32 ar9462_2p1_baseband_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
+       {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+       {0x00009824, 0x63c640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+       {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
+       {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+       {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+       {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+       {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a2},
+       {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+       {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+       {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+       {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
+       {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+       {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+       {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+       {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
+       {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+       {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+       {0x0000a204, 0x01318fc0, 0x01318fc4, 0x01318fc4, 0x01318fc0},
+       {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+       {0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
+       {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+       {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+       {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+       {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+       {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+       {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+       {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+       {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+       {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+       {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+       {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
+       {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+       {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+       {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
+       {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+       {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a3a4, 0x00000050, 0x00000050, 0x00000000, 0x00000000},
+       {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
+       {0x0000a3ac, 0xaaaaaa00, 0xaa30aa30, 0xaaaaaa00, 0xaaaaaa00},
+       {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+       {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+       {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+       {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+       {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+       {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+       {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+       {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
+       {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+       {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+       {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
+};
+
+static const u32 ar9462_2p1_radio_core[][2] = {
+       /* Addr      allmodes  */
+       {0x00016000, 0x36db6db6},
+       {0x00016004, 0x6db6db40},
+       {0x00016008, 0x73f00000},
+       {0x0001600c, 0x00000000},
+       {0x00016010, 0x6d820001},
+       {0x00016040, 0x7f80fff8},
+       {0x0001604c, 0x2699e04f},
+       {0x00016050, 0x6db6db6c},
+       {0x00016058, 0x6c200000},
+       {0x00016080, 0x000c0000},
+       {0x00016084, 0x9a68048c},
+       {0x00016088, 0x54214514},
+       {0x0001608c, 0x1203040b},
+       {0x00016090, 0x24926490},
+       {0x00016098, 0xd2888888},
+       {0x000160a0, 0x0a108ffe},
+       {0x000160a4, 0x812fc491},
+       {0x000160a8, 0x423c8000},
+       {0x000160b4, 0x92000000},
+       {0x000160b8, 0x0285dddc},
+       {0x000160bc, 0x02908888},
+       {0x000160c0, 0x00adb6d0},
+       {0x000160c4, 0x6db6db60},
+       {0x000160c8, 0x6db6db6c},
+       {0x000160cc, 0x0de6c1b0},
+       {0x00016100, 0x3fffbe04},
+       {0x00016104, 0xfff80000},
+       {0x00016108, 0x00200400},
+       {0x00016110, 0x00000000},
+       {0x00016144, 0x02084080},
+       {0x00016148, 0x000080c0},
+       {0x00016280, 0x050a0001},
+       {0x00016284, 0x3d841418},
+       {0x00016288, 0x00000000},
+       {0x0001628c, 0xe3000000},
+       {0x00016290, 0xa1005080},
+       {0x00016294, 0x00000020},
+       {0x00016298, 0x54a82900},
+       {0x00016340, 0x121e4276},
+       {0x00016344, 0x00300000},
+       {0x00016400, 0x36db6db6},
+       {0x00016404, 0x6db6db40},
+       {0x00016408, 0x73f00000},
+       {0x0001640c, 0x00000000},
+       {0x00016410, 0x6c800001},
+       {0x00016440, 0x7f80fff8},
+       {0x0001644c, 0x4699e04f},
+       {0x00016450, 0x6db6db6c},
+       {0x00016500, 0x3fffbe04},
+       {0x00016504, 0xfff80000},
+       {0x00016508, 0x00200400},
+       {0x00016510, 0x00000000},
+       {0x00016544, 0x02084080},
+       {0x00016548, 0x000080c0},
+};
+
+static const u32 ar9462_2p1_radio_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x0001609c, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524, 0x0b8ee524},
+       {0x000160b0, 0x01d67f70, 0x01d67f70, 0x01d67f70, 0x01d67f70},
+       {0x0001610c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
+       {0x0001650c, 0x48000000, 0x40000000, 0x40000000, 0x40000000},
+};
+
+static const u32 ar9462_2p1_soc_preamble[][2] = {
+       /* Addr      allmodes  */
+       {0x000040a4, 0x00a0c1c9},
+       {0x00007020, 0x00000000},
+       {0x00007034, 0x00000002},
+       {0x00007038, 0x000004c2},
+};
+
+static const u32 ar9462_2p1_soc_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033},
+};
+
+static const u32 ar9462_2p1_radio_postamble_sys2ant[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
+       {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+       {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+};
+
+static const u32 ar9462_2p1_common_rx_gain[][2] = {
+       /* Addr      allmodes  */
+       {0x0000a000, 0x00010000},
+       {0x0000a004, 0x00030002},
+       {0x0000a008, 0x00050004},
+       {0x0000a00c, 0x00810080},
+       {0x0000a010, 0x00830082},
+       {0x0000a014, 0x01810180},
+       {0x0000a018, 0x01830182},
+       {0x0000a01c, 0x01850184},
+       {0x0000a020, 0x01890188},
+       {0x0000a024, 0x018b018a},
+       {0x0000a028, 0x018d018c},
+       {0x0000a02c, 0x01910190},
+       {0x0000a030, 0x01930192},
+       {0x0000a034, 0x01950194},
+       {0x0000a038, 0x038a0196},
+       {0x0000a03c, 0x038c038b},
+       {0x0000a040, 0x0390038d},
+       {0x0000a044, 0x03920391},
+       {0x0000a048, 0x03940393},
+       {0x0000a04c, 0x03960395},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x22222229},
+       {0x0000a084, 0x1d1d1d1d},
+       {0x0000a088, 0x1d1d1d1d},
+       {0x0000a08c, 0x1d1d1d1d},
+       {0x0000a090, 0x171d1d1d},
+       {0x0000a094, 0x11111717},
+       {0x0000a098, 0x00030311},
+       {0x0000a09c, 0x00000000},
+       {0x0000a0a0, 0x00000000},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x001f0000},
+       {0x0000a0c4, 0x01000101},
+       {0x0000a0c8, 0x011e011f},
+       {0x0000a0cc, 0x011c011d},
+       {0x0000a0d0, 0x02030204},
+       {0x0000a0d4, 0x02010202},
+       {0x0000a0d8, 0x021f0200},
+       {0x0000a0dc, 0x0302021e},
+       {0x0000a0e0, 0x03000301},
+       {0x0000a0e4, 0x031e031f},
+       {0x0000a0e8, 0x0402031d},
+       {0x0000a0ec, 0x04000401},
+       {0x0000a0f0, 0x041e041f},
+       {0x0000a0f4, 0x0502041d},
+       {0x0000a0f8, 0x05000501},
+       {0x0000a0fc, 0x051e051f},
+       {0x0000a100, 0x06010602},
+       {0x0000a104, 0x061f0600},
+       {0x0000a108, 0x061d061e},
+       {0x0000a10c, 0x07020703},
+       {0x0000a110, 0x07000701},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x001f0000},
+       {0x0000a144, 0x01000101},
+       {0x0000a148, 0x011e011f},
+       {0x0000a14c, 0x011c011d},
+       {0x0000a150, 0x02030204},
+       {0x0000a154, 0x02010202},
+       {0x0000a158, 0x021f0200},
+       {0x0000a15c, 0x0302021e},
+       {0x0000a160, 0x03000301},
+       {0x0000a164, 0x031e031f},
+       {0x0000a168, 0x0402031d},
+       {0x0000a16c, 0x04000401},
+       {0x0000a170, 0x041e041f},
+       {0x0000a174, 0x0502041d},
+       {0x0000a178, 0x05000501},
+       {0x0000a17c, 0x051e051f},
+       {0x0000a180, 0x06010602},
+       {0x0000a184, 0x061f0600},
+       {0x0000a188, 0x061d061e},
+       {0x0000a18c, 0x07020703},
+       {0x0000a190, 0x07000701},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000196},
+       {0x0000b000, 0x00010000},
+       {0x0000b004, 0x00030002},
+       {0x0000b008, 0x00050004},
+       {0x0000b00c, 0x00810080},
+       {0x0000b010, 0x00830082},
+       {0x0000b014, 0x01810180},
+       {0x0000b018, 0x01830182},
+       {0x0000b01c, 0x01850184},
+       {0x0000b020, 0x02810280},
+       {0x0000b024, 0x02830282},
+       {0x0000b028, 0x02850284},
+       {0x0000b02c, 0x02890288},
+       {0x0000b030, 0x028b028a},
+       {0x0000b034, 0x0388028c},
+       {0x0000b038, 0x038a0389},
+       {0x0000b03c, 0x038c038b},
+       {0x0000b040, 0x0390038d},
+       {0x0000b044, 0x03920391},
+       {0x0000b048, 0x03940393},
+       {0x0000b04c, 0x03960395},
+       {0x0000b050, 0x00000000},
+       {0x0000b054, 0x00000000},
+       {0x0000b058, 0x00000000},
+       {0x0000b05c, 0x00000000},
+       {0x0000b060, 0x00000000},
+       {0x0000b064, 0x00000000},
+       {0x0000b068, 0x00000000},
+       {0x0000b06c, 0x00000000},
+       {0x0000b070, 0x00000000},
+       {0x0000b074, 0x00000000},
+       {0x0000b078, 0x00000000},
+       {0x0000b07c, 0x00000000},
+       {0x0000b080, 0x2a2d2f32},
+       {0x0000b084, 0x21232328},
+       {0x0000b088, 0x19191c1e},
+       {0x0000b08c, 0x12141417},
+       {0x0000b090, 0x07070e0e},
+       {0x0000b094, 0x03030305},
+       {0x0000b098, 0x00000003},
+       {0x0000b09c, 0x00000000},
+       {0x0000b0a0, 0x00000000},
+       {0x0000b0a4, 0x00000000},
+       {0x0000b0a8, 0x00000000},
+       {0x0000b0ac, 0x00000000},
+       {0x0000b0b0, 0x00000000},
+       {0x0000b0b4, 0x00000000},
+       {0x0000b0b8, 0x00000000},
+       {0x0000b0bc, 0x00000000},
+       {0x0000b0c0, 0x003f0020},
+       {0x0000b0c4, 0x00400041},
+       {0x0000b0c8, 0x0140005f},
+       {0x0000b0cc, 0x0160015f},
+       {0x0000b0d0, 0x017e017f},
+       {0x0000b0d4, 0x02410242},
+       {0x0000b0d8, 0x025f0240},
+       {0x0000b0dc, 0x027f0260},
+       {0x0000b0e0, 0x0341027e},
+       {0x0000b0e4, 0x035f0340},
+       {0x0000b0e8, 0x037f0360},
+       {0x0000b0ec, 0x04400441},
+       {0x0000b0f0, 0x0460045f},
+       {0x0000b0f4, 0x0541047f},
+       {0x0000b0f8, 0x055f0540},
+       {0x0000b0fc, 0x057f0560},
+       {0x0000b100, 0x06400641},
+       {0x0000b104, 0x0660065f},
+       {0x0000b108, 0x067e067f},
+       {0x0000b10c, 0x07410742},
+       {0x0000b110, 0x075f0740},
+       {0x0000b114, 0x077f0760},
+       {0x0000b118, 0x07800781},
+       {0x0000b11c, 0x07a0079f},
+       {0x0000b120, 0x07c107bf},
+       {0x0000b124, 0x000007c0},
+       {0x0000b128, 0x00000000},
+       {0x0000b12c, 0x00000000},
+       {0x0000b130, 0x00000000},
+       {0x0000b134, 0x00000000},
+       {0x0000b138, 0x00000000},
+       {0x0000b13c, 0x00000000},
+       {0x0000b140, 0x003f0020},
+       {0x0000b144, 0x00400041},
+       {0x0000b148, 0x0140005f},
+       {0x0000b14c, 0x0160015f},
+       {0x0000b150, 0x017e017f},
+       {0x0000b154, 0x02410242},
+       {0x0000b158, 0x025f0240},
+       {0x0000b15c, 0x027f0260},
+       {0x0000b160, 0x0341027e},
+       {0x0000b164, 0x035f0340},
+       {0x0000b168, 0x037f0360},
+       {0x0000b16c, 0x04400441},
+       {0x0000b170, 0x0460045f},
+       {0x0000b174, 0x0541047f},
+       {0x0000b178, 0x055f0540},
+       {0x0000b17c, 0x057f0560},
+       {0x0000b180, 0x06400641},
+       {0x0000b184, 0x0660065f},
+       {0x0000b188, 0x067e067f},
+       {0x0000b18c, 0x07410742},
+       {0x0000b190, 0x075f0740},
+       {0x0000b194, 0x077f0760},
+       {0x0000b198, 0x07800781},
+       {0x0000b19c, 0x07a0079f},
+       {0x0000b1a0, 0x07c107bf},
+       {0x0000b1a4, 0x000007c0},
+       {0x0000b1a8, 0x00000000},
+       {0x0000b1ac, 0x00000000},
+       {0x0000b1b0, 0x00000000},
+       {0x0000b1b4, 0x00000000},
+       {0x0000b1b8, 0x00000000},
+       {0x0000b1bc, 0x00000000},
+       {0x0000b1c0, 0x00000000},
+       {0x0000b1c4, 0x00000000},
+       {0x0000b1c8, 0x00000000},
+       {0x0000b1cc, 0x00000000},
+       {0x0000b1d0, 0x00000000},
+       {0x0000b1d4, 0x00000000},
+       {0x0000b1d8, 0x00000000},
+       {0x0000b1dc, 0x00000000},
+       {0x0000b1e0, 0x00000000},
+       {0x0000b1e4, 0x00000000},
+       {0x0000b1e8, 0x00000000},
+       {0x0000b1ec, 0x00000000},
+       {0x0000b1f0, 0x00000396},
+       {0x0000b1f4, 0x00000396},
+       {0x0000b1f8, 0x00000396},
+       {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p1_common_mixed_rx_gain[][2] = {
+       /* Addr      allmodes  */
+       {0x0000a000, 0x00010000},
+       {0x0000a004, 0x00030002},
+       {0x0000a008, 0x00050004},
+       {0x0000a00c, 0x00810080},
+       {0x0000a010, 0x00830082},
+       {0x0000a014, 0x01810180},
+       {0x0000a018, 0x01830182},
+       {0x0000a01c, 0x01850184},
+       {0x0000a020, 0x01890188},
+       {0x0000a024, 0x018b018a},
+       {0x0000a028, 0x018d018c},
+       {0x0000a02c, 0x03820190},
+       {0x0000a030, 0x03840383},
+       {0x0000a034, 0x03880385},
+       {0x0000a038, 0x038a0389},
+       {0x0000a03c, 0x038c038b},
+       {0x0000a040, 0x0390038d},
+       {0x0000a044, 0x03920391},
+       {0x0000a048, 0x03940393},
+       {0x0000a04c, 0x03960395},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x29292929},
+       {0x0000a084, 0x29292929},
+       {0x0000a088, 0x29292929},
+       {0x0000a08c, 0x29292929},
+       {0x0000a090, 0x22292929},
+       {0x0000a094, 0x1d1d2222},
+       {0x0000a098, 0x0c111117},
+       {0x0000a09c, 0x00030303},
+       {0x0000a0a0, 0x00000000},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x001f0000},
+       {0x0000a0c4, 0x01000101},
+       {0x0000a0c8, 0x011e011f},
+       {0x0000a0cc, 0x011c011d},
+       {0x0000a0d0, 0x02030204},
+       {0x0000a0d4, 0x02010202},
+       {0x0000a0d8, 0x021f0200},
+       {0x0000a0dc, 0x0302021e},
+       {0x0000a0e0, 0x03000301},
+       {0x0000a0e4, 0x031e031f},
+       {0x0000a0e8, 0x0402031d},
+       {0x0000a0ec, 0x04000401},
+       {0x0000a0f0, 0x041e041f},
+       {0x0000a0f4, 0x0502041d},
+       {0x0000a0f8, 0x05000501},
+       {0x0000a0fc, 0x051e051f},
+       {0x0000a100, 0x06010602},
+       {0x0000a104, 0x061f0600},
+       {0x0000a108, 0x061d061e},
+       {0x0000a10c, 0x07020703},
+       {0x0000a110, 0x07000701},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x001f0000},
+       {0x0000a144, 0x01000101},
+       {0x0000a148, 0x011e011f},
+       {0x0000a14c, 0x011c011d},
+       {0x0000a150, 0x02030204},
+       {0x0000a154, 0x02010202},
+       {0x0000a158, 0x021f0200},
+       {0x0000a15c, 0x0302021e},
+       {0x0000a160, 0x03000301},
+       {0x0000a164, 0x031e031f},
+       {0x0000a168, 0x0402031d},
+       {0x0000a16c, 0x04000401},
+       {0x0000a170, 0x041e041f},
+       {0x0000a174, 0x0502041d},
+       {0x0000a178, 0x05000501},
+       {0x0000a17c, 0x051e051f},
+       {0x0000a180, 0x06010602},
+       {0x0000a184, 0x061f0600},
+       {0x0000a188, 0x061d061e},
+       {0x0000a18c, 0x07020703},
+       {0x0000a190, 0x07000701},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000196},
+       {0x0000b000, 0x00010000},
+       {0x0000b004, 0x00030002},
+       {0x0000b008, 0x00050004},
+       {0x0000b00c, 0x00810080},
+       {0x0000b010, 0x00830082},
+       {0x0000b014, 0x01810180},
+       {0x0000b018, 0x01830182},
+       {0x0000b01c, 0x01850184},
+       {0x0000b020, 0x02810280},
+       {0x0000b024, 0x02830282},
+       {0x0000b028, 0x02850284},
+       {0x0000b02c, 0x02890288},
+       {0x0000b030, 0x028b028a},
+       {0x0000b034, 0x0388028c},
+       {0x0000b038, 0x038a0389},
+       {0x0000b03c, 0x038c038b},
+       {0x0000b040, 0x0390038d},
+       {0x0000b044, 0x03920391},
+       {0x0000b048, 0x03940393},
+       {0x0000b04c, 0x03960395},
+       {0x0000b050, 0x00000000},
+       {0x0000b054, 0x00000000},
+       {0x0000b058, 0x00000000},
+       {0x0000b05c, 0x00000000},
+       {0x0000b060, 0x00000000},
+       {0x0000b064, 0x00000000},
+       {0x0000b068, 0x00000000},
+       {0x0000b06c, 0x00000000},
+       {0x0000b070, 0x00000000},
+       {0x0000b074, 0x00000000},
+       {0x0000b078, 0x00000000},
+       {0x0000b07c, 0x00000000},
+       {0x0000b080, 0x2a2d2f32},
+       {0x0000b084, 0x21232328},
+       {0x0000b088, 0x19191c1e},
+       {0x0000b08c, 0x12141417},
+       {0x0000b090, 0x07070e0e},
+       {0x0000b094, 0x03030305},
+       {0x0000b098, 0x00000003},
+       {0x0000b09c, 0x00000000},
+       {0x0000b0a0, 0x00000000},
+       {0x0000b0a4, 0x00000000},
+       {0x0000b0a8, 0x00000000},
+       {0x0000b0ac, 0x00000000},
+       {0x0000b0b0, 0x00000000},
+       {0x0000b0b4, 0x00000000},
+       {0x0000b0b8, 0x00000000},
+       {0x0000b0bc, 0x00000000},
+       {0x0000b0c0, 0x003f0020},
+       {0x0000b0c4, 0x00400041},
+       {0x0000b0c8, 0x0140005f},
+       {0x0000b0cc, 0x0160015f},
+       {0x0000b0d0, 0x017e017f},
+       {0x0000b0d4, 0x02410242},
+       {0x0000b0d8, 0x025f0240},
+       {0x0000b0dc, 0x027f0260},
+       {0x0000b0e0, 0x0341027e},
+       {0x0000b0e4, 0x035f0340},
+       {0x0000b0e8, 0x037f0360},
+       {0x0000b0ec, 0x04400441},
+       {0x0000b0f0, 0x0460045f},
+       {0x0000b0f4, 0x0541047f},
+       {0x0000b0f8, 0x055f0540},
+       {0x0000b0fc, 0x057f0560},
+       {0x0000b100, 0x06400641},
+       {0x0000b104, 0x0660065f},
+       {0x0000b108, 0x067e067f},
+       {0x0000b10c, 0x07410742},
+       {0x0000b110, 0x075f0740},
+       {0x0000b114, 0x077f0760},
+       {0x0000b118, 0x07800781},
+       {0x0000b11c, 0x07a0079f},
+       {0x0000b120, 0x07c107bf},
+       {0x0000b124, 0x000007c0},
+       {0x0000b128, 0x00000000},
+       {0x0000b12c, 0x00000000},
+       {0x0000b130, 0x00000000},
+       {0x0000b134, 0x00000000},
+       {0x0000b138, 0x00000000},
+       {0x0000b13c, 0x00000000},
+       {0x0000b140, 0x003f0020},
+       {0x0000b144, 0x00400041},
+       {0x0000b148, 0x0140005f},
+       {0x0000b14c, 0x0160015f},
+       {0x0000b150, 0x017e017f},
+       {0x0000b154, 0x02410242},
+       {0x0000b158, 0x025f0240},
+       {0x0000b15c, 0x027f0260},
+       {0x0000b160, 0x0341027e},
+       {0x0000b164, 0x035f0340},
+       {0x0000b168, 0x037f0360},
+       {0x0000b16c, 0x04400441},
+       {0x0000b170, 0x0460045f},
+       {0x0000b174, 0x0541047f},
+       {0x0000b178, 0x055f0540},
+       {0x0000b17c, 0x057f0560},
+       {0x0000b180, 0x06400641},
+       {0x0000b184, 0x0660065f},
+       {0x0000b188, 0x067e067f},
+       {0x0000b18c, 0x07410742},
+       {0x0000b190, 0x075f0740},
+       {0x0000b194, 0x077f0760},
+       {0x0000b198, 0x07800781},
+       {0x0000b19c, 0x07a0079f},
+       {0x0000b1a0, 0x07c107bf},
+       {0x0000b1a4, 0x000007c0},
+       {0x0000b1a8, 0x00000000},
+       {0x0000b1ac, 0x00000000},
+       {0x0000b1b0, 0x00000000},
+       {0x0000b1b4, 0x00000000},
+       {0x0000b1b8, 0x00000000},
+       {0x0000b1bc, 0x00000000},
+       {0x0000b1c0, 0x00000000},
+       {0x0000b1c4, 0x00000000},
+       {0x0000b1c8, 0x00000000},
+       {0x0000b1cc, 0x00000000},
+       {0x0000b1d0, 0x00000000},
+       {0x0000b1d4, 0x00000000},
+       {0x0000b1d8, 0x00000000},
+       {0x0000b1dc, 0x00000000},
+       {0x0000b1e0, 0x00000000},
+       {0x0000b1e4, 0x00000000},
+       {0x0000b1e8, 0x00000000},
+       {0x0000b1ec, 0x00000000},
+       {0x0000b1f0, 0x00000396},
+       {0x0000b1f4, 0x00000396},
+       {0x0000b1f8, 0x00000396},
+       {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p1_baseband_core_mix_rxgain[][2] = {
+       /* Addr      allmodes  */
+       {0x00009fd0, 0x0a2d6b93},
+};
+
+static const u32 ar9462_2p1_baseband_postamble_mix_rxgain[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00009820, 0x206a022e, 0x206a022e, 0x206a01ae, 0x206a01ae},
+       {0x00009824, 0x63c640de, 0x5ac640d0, 0x63c640da, 0x63c640da},
+       {0x00009828, 0x0796be89, 0x0696b081, 0x0916be81, 0x0916be81},
+       {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000d8, 0x6c4000d8},
+       {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec86d2e, 0x7ec86d2e},
+       {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32395c5e},
+};
+
+static const u32 ar9462_2p1_baseband_postamble_5g_xlna[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+};
+
+static const u32 ar9462_2p1_common_wo_xlna_rx_gain[][2] = {
+       /* Addr      allmodes  */
+       {0x0000a000, 0x00010000},
+       {0x0000a004, 0x00030002},
+       {0x0000a008, 0x00050004},
+       {0x0000a00c, 0x00810080},
+       {0x0000a010, 0x00830082},
+       {0x0000a014, 0x01810180},
+       {0x0000a018, 0x01830182},
+       {0x0000a01c, 0x01850184},
+       {0x0000a020, 0x01890188},
+       {0x0000a024, 0x018b018a},
+       {0x0000a028, 0x018d018c},
+       {0x0000a02c, 0x03820190},
+       {0x0000a030, 0x03840383},
+       {0x0000a034, 0x03880385},
+       {0x0000a038, 0x038a0389},
+       {0x0000a03c, 0x038c038b},
+       {0x0000a040, 0x0390038d},
+       {0x0000a044, 0x03920391},
+       {0x0000a048, 0x03940393},
+       {0x0000a04c, 0x03960395},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x29292929},
+       {0x0000a084, 0x29292929},
+       {0x0000a088, 0x29292929},
+       {0x0000a08c, 0x29292929},
+       {0x0000a090, 0x22292929},
+       {0x0000a094, 0x1d1d2222},
+       {0x0000a098, 0x0c111117},
+       {0x0000a09c, 0x00030303},
+       {0x0000a0a0, 0x00000000},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x001f0000},
+       {0x0000a0c4, 0x01000101},
+       {0x0000a0c8, 0x011e011f},
+       {0x0000a0cc, 0x011c011d},
+       {0x0000a0d0, 0x02030204},
+       {0x0000a0d4, 0x02010202},
+       {0x0000a0d8, 0x021f0200},
+       {0x0000a0dc, 0x0302021e},
+       {0x0000a0e0, 0x03000301},
+       {0x0000a0e4, 0x031e031f},
+       {0x0000a0e8, 0x0402031d},
+       {0x0000a0ec, 0x04000401},
+       {0x0000a0f0, 0x041e041f},
+       {0x0000a0f4, 0x0502041d},
+       {0x0000a0f8, 0x05000501},
+       {0x0000a0fc, 0x051e051f},
+       {0x0000a100, 0x06010602},
+       {0x0000a104, 0x061f0600},
+       {0x0000a108, 0x061d061e},
+       {0x0000a10c, 0x07020703},
+       {0x0000a110, 0x07000701},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x001f0000},
+       {0x0000a144, 0x01000101},
+       {0x0000a148, 0x011e011f},
+       {0x0000a14c, 0x011c011d},
+       {0x0000a150, 0x02030204},
+       {0x0000a154, 0x02010202},
+       {0x0000a158, 0x021f0200},
+       {0x0000a15c, 0x0302021e},
+       {0x0000a160, 0x03000301},
+       {0x0000a164, 0x031e031f},
+       {0x0000a168, 0x0402031d},
+       {0x0000a16c, 0x04000401},
+       {0x0000a170, 0x041e041f},
+       {0x0000a174, 0x0502041d},
+       {0x0000a178, 0x05000501},
+       {0x0000a17c, 0x051e051f},
+       {0x0000a180, 0x06010602},
+       {0x0000a184, 0x061f0600},
+       {0x0000a188, 0x061d061e},
+       {0x0000a18c, 0x07020703},
+       {0x0000a190, 0x07000701},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000196},
+       {0x0000b000, 0x00010000},
+       {0x0000b004, 0x00030002},
+       {0x0000b008, 0x00050004},
+       {0x0000b00c, 0x00810080},
+       {0x0000b010, 0x00830082},
+       {0x0000b014, 0x01810180},
+       {0x0000b018, 0x01830182},
+       {0x0000b01c, 0x01850184},
+       {0x0000b020, 0x02810280},
+       {0x0000b024, 0x02830282},
+       {0x0000b028, 0x02850284},
+       {0x0000b02c, 0x02890288},
+       {0x0000b030, 0x028b028a},
+       {0x0000b034, 0x0388028c},
+       {0x0000b038, 0x038a0389},
+       {0x0000b03c, 0x038c038b},
+       {0x0000b040, 0x0390038d},
+       {0x0000b044, 0x03920391},
+       {0x0000b048, 0x03940393},
+       {0x0000b04c, 0x03960395},
+       {0x0000b050, 0x00000000},
+       {0x0000b054, 0x00000000},
+       {0x0000b058, 0x00000000},
+       {0x0000b05c, 0x00000000},
+       {0x0000b060, 0x00000000},
+       {0x0000b064, 0x00000000},
+       {0x0000b068, 0x00000000},
+       {0x0000b06c, 0x00000000},
+       {0x0000b070, 0x00000000},
+       {0x0000b074, 0x00000000},
+       {0x0000b078, 0x00000000},
+       {0x0000b07c, 0x00000000},
+       {0x0000b080, 0x32323232},
+       {0x0000b084, 0x2f2f3232},
+       {0x0000b088, 0x23282a2d},
+       {0x0000b08c, 0x1c1e2123},
+       {0x0000b090, 0x14171919},
+       {0x0000b094, 0x0e0e1214},
+       {0x0000b098, 0x03050707},
+       {0x0000b09c, 0x00030303},
+       {0x0000b0a0, 0x00000000},
+       {0x0000b0a4, 0x00000000},
+       {0x0000b0a8, 0x00000000},
+       {0x0000b0ac, 0x00000000},
+       {0x0000b0b0, 0x00000000},
+       {0x0000b0b4, 0x00000000},
+       {0x0000b0b8, 0x00000000},
+       {0x0000b0bc, 0x00000000},
+       {0x0000b0c0, 0x003f0020},
+       {0x0000b0c4, 0x00400041},
+       {0x0000b0c8, 0x0140005f},
+       {0x0000b0cc, 0x0160015f},
+       {0x0000b0d0, 0x017e017f},
+       {0x0000b0d4, 0x02410242},
+       {0x0000b0d8, 0x025f0240},
+       {0x0000b0dc, 0x027f0260},
+       {0x0000b0e0, 0x0341027e},
+       {0x0000b0e4, 0x035f0340},
+       {0x0000b0e8, 0x037f0360},
+       {0x0000b0ec, 0x04400441},
+       {0x0000b0f0, 0x0460045f},
+       {0x0000b0f4, 0x0541047f},
+       {0x0000b0f8, 0x055f0540},
+       {0x0000b0fc, 0x057f0560},
+       {0x0000b100, 0x06400641},
+       {0x0000b104, 0x0660065f},
+       {0x0000b108, 0x067e067f},
+       {0x0000b10c, 0x07410742},
+       {0x0000b110, 0x075f0740},
+       {0x0000b114, 0x077f0760},
+       {0x0000b118, 0x07800781},
+       {0x0000b11c, 0x07a0079f},
+       {0x0000b120, 0x07c107bf},
+       {0x0000b124, 0x000007c0},
+       {0x0000b128, 0x00000000},
+       {0x0000b12c, 0x00000000},
+       {0x0000b130, 0x00000000},
+       {0x0000b134, 0x00000000},
+       {0x0000b138, 0x00000000},
+       {0x0000b13c, 0x00000000},
+       {0x0000b140, 0x003f0020},
+       {0x0000b144, 0x00400041},
+       {0x0000b148, 0x0140005f},
+       {0x0000b14c, 0x0160015f},
+       {0x0000b150, 0x017e017f},
+       {0x0000b154, 0x02410242},
+       {0x0000b158, 0x025f0240},
+       {0x0000b15c, 0x027f0260},
+       {0x0000b160, 0x0341027e},
+       {0x0000b164, 0x035f0340},
+       {0x0000b168, 0x037f0360},
+       {0x0000b16c, 0x04400441},
+       {0x0000b170, 0x0460045f},
+       {0x0000b174, 0x0541047f},
+       {0x0000b178, 0x055f0540},
+       {0x0000b17c, 0x057f0560},
+       {0x0000b180, 0x06400641},
+       {0x0000b184, 0x0660065f},
+       {0x0000b188, 0x067e067f},
+       {0x0000b18c, 0x07410742},
+       {0x0000b190, 0x075f0740},
+       {0x0000b194, 0x077f0760},
+       {0x0000b198, 0x07800781},
+       {0x0000b19c, 0x07a0079f},
+       {0x0000b1a0, 0x07c107bf},
+       {0x0000b1a4, 0x000007c0},
+       {0x0000b1a8, 0x00000000},
+       {0x0000b1ac, 0x00000000},
+       {0x0000b1b0, 0x00000000},
+       {0x0000b1b4, 0x00000000},
+       {0x0000b1b8, 0x00000000},
+       {0x0000b1bc, 0x00000000},
+       {0x0000b1c0, 0x00000000},
+       {0x0000b1c4, 0x00000000},
+       {0x0000b1c8, 0x00000000},
+       {0x0000b1cc, 0x00000000},
+       {0x0000b1d0, 0x00000000},
+       {0x0000b1d4, 0x00000000},
+       {0x0000b1d8, 0x00000000},
+       {0x0000b1dc, 0x00000000},
+       {0x0000b1e0, 0x00000000},
+       {0x0000b1e4, 0x00000000},
+       {0x0000b1e8, 0x00000000},
+       {0x0000b1ec, 0x00000000},
+       {0x0000b1f0, 0x00000396},
+       {0x0000b1f4, 0x00000396},
+       {0x0000b1f8, 0x00000396},
+       {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p1_common_5g_xlna_only_rx_gain[][2] = {
+       /* Addr      allmodes  */
+       {0x0000a000, 0x00010000},
+       {0x0000a004, 0x00030002},
+       {0x0000a008, 0x00050004},
+       {0x0000a00c, 0x00810080},
+       {0x0000a010, 0x00830082},
+       {0x0000a014, 0x01810180},
+       {0x0000a018, 0x01830182},
+       {0x0000a01c, 0x01850184},
+       {0x0000a020, 0x01890188},
+       {0x0000a024, 0x018b018a},
+       {0x0000a028, 0x018d018c},
+       {0x0000a02c, 0x03820190},
+       {0x0000a030, 0x03840383},
+       {0x0000a034, 0x03880385},
+       {0x0000a038, 0x038a0389},
+       {0x0000a03c, 0x038c038b},
+       {0x0000a040, 0x0390038d},
+       {0x0000a044, 0x03920391},
+       {0x0000a048, 0x03940393},
+       {0x0000a04c, 0x03960395},
+       {0x0000a050, 0x00000000},
+       {0x0000a054, 0x00000000},
+       {0x0000a058, 0x00000000},
+       {0x0000a05c, 0x00000000},
+       {0x0000a060, 0x00000000},
+       {0x0000a064, 0x00000000},
+       {0x0000a068, 0x00000000},
+       {0x0000a06c, 0x00000000},
+       {0x0000a070, 0x00000000},
+       {0x0000a074, 0x00000000},
+       {0x0000a078, 0x00000000},
+       {0x0000a07c, 0x00000000},
+       {0x0000a080, 0x29292929},
+       {0x0000a084, 0x29292929},
+       {0x0000a088, 0x29292929},
+       {0x0000a08c, 0x29292929},
+       {0x0000a090, 0x22292929},
+       {0x0000a094, 0x1d1d2222},
+       {0x0000a098, 0x0c111117},
+       {0x0000a09c, 0x00030303},
+       {0x0000a0a0, 0x00000000},
+       {0x0000a0a4, 0x00000000},
+       {0x0000a0a8, 0x00000000},
+       {0x0000a0ac, 0x00000000},
+       {0x0000a0b0, 0x00000000},
+       {0x0000a0b4, 0x00000000},
+       {0x0000a0b8, 0x00000000},
+       {0x0000a0bc, 0x00000000},
+       {0x0000a0c0, 0x001f0000},
+       {0x0000a0c4, 0x01000101},
+       {0x0000a0c8, 0x011e011f},
+       {0x0000a0cc, 0x011c011d},
+       {0x0000a0d0, 0x02030204},
+       {0x0000a0d4, 0x02010202},
+       {0x0000a0d8, 0x021f0200},
+       {0x0000a0dc, 0x0302021e},
+       {0x0000a0e0, 0x03000301},
+       {0x0000a0e4, 0x031e031f},
+       {0x0000a0e8, 0x0402031d},
+       {0x0000a0ec, 0x04000401},
+       {0x0000a0f0, 0x041e041f},
+       {0x0000a0f4, 0x0502041d},
+       {0x0000a0f8, 0x05000501},
+       {0x0000a0fc, 0x051e051f},
+       {0x0000a100, 0x06010602},
+       {0x0000a104, 0x061f0600},
+       {0x0000a108, 0x061d061e},
+       {0x0000a10c, 0x07020703},
+       {0x0000a110, 0x07000701},
+       {0x0000a114, 0x00000000},
+       {0x0000a118, 0x00000000},
+       {0x0000a11c, 0x00000000},
+       {0x0000a120, 0x00000000},
+       {0x0000a124, 0x00000000},
+       {0x0000a128, 0x00000000},
+       {0x0000a12c, 0x00000000},
+       {0x0000a130, 0x00000000},
+       {0x0000a134, 0x00000000},
+       {0x0000a138, 0x00000000},
+       {0x0000a13c, 0x00000000},
+       {0x0000a140, 0x001f0000},
+       {0x0000a144, 0x01000101},
+       {0x0000a148, 0x011e011f},
+       {0x0000a14c, 0x011c011d},
+       {0x0000a150, 0x02030204},
+       {0x0000a154, 0x02010202},
+       {0x0000a158, 0x021f0200},
+       {0x0000a15c, 0x0302021e},
+       {0x0000a160, 0x03000301},
+       {0x0000a164, 0x031e031f},
+       {0x0000a168, 0x0402031d},
+       {0x0000a16c, 0x04000401},
+       {0x0000a170, 0x041e041f},
+       {0x0000a174, 0x0502041d},
+       {0x0000a178, 0x05000501},
+       {0x0000a17c, 0x051e051f},
+       {0x0000a180, 0x06010602},
+       {0x0000a184, 0x061f0600},
+       {0x0000a188, 0x061d061e},
+       {0x0000a18c, 0x07020703},
+       {0x0000a190, 0x07000701},
+       {0x0000a194, 0x00000000},
+       {0x0000a198, 0x00000000},
+       {0x0000a19c, 0x00000000},
+       {0x0000a1a0, 0x00000000},
+       {0x0000a1a4, 0x00000000},
+       {0x0000a1a8, 0x00000000},
+       {0x0000a1ac, 0x00000000},
+       {0x0000a1b0, 0x00000000},
+       {0x0000a1b4, 0x00000000},
+       {0x0000a1b8, 0x00000000},
+       {0x0000a1bc, 0x00000000},
+       {0x0000a1c0, 0x00000000},
+       {0x0000a1c4, 0x00000000},
+       {0x0000a1c8, 0x00000000},
+       {0x0000a1cc, 0x00000000},
+       {0x0000a1d0, 0x00000000},
+       {0x0000a1d4, 0x00000000},
+       {0x0000a1d8, 0x00000000},
+       {0x0000a1dc, 0x00000000},
+       {0x0000a1e0, 0x00000000},
+       {0x0000a1e4, 0x00000000},
+       {0x0000a1e8, 0x00000000},
+       {0x0000a1ec, 0x00000000},
+       {0x0000a1f0, 0x00000396},
+       {0x0000a1f4, 0x00000396},
+       {0x0000a1f8, 0x00000396},
+       {0x0000a1fc, 0x00000196},
+       {0x0000b000, 0x00010000},
+       {0x0000b004, 0x00030002},
+       {0x0000b008, 0x00050004},
+       {0x0000b00c, 0x00810080},
+       {0x0000b010, 0x00830082},
+       {0x0000b014, 0x01810180},
+       {0x0000b018, 0x01830182},
+       {0x0000b01c, 0x01850184},
+       {0x0000b020, 0x02810280},
+       {0x0000b024, 0x02830282},
+       {0x0000b028, 0x02850284},
+       {0x0000b02c, 0x02890288},
+       {0x0000b030, 0x028b028a},
+       {0x0000b034, 0x0388028c},
+       {0x0000b038, 0x038a0389},
+       {0x0000b03c, 0x038c038b},
+       {0x0000b040, 0x0390038d},
+       {0x0000b044, 0x03920391},
+       {0x0000b048, 0x03940393},
+       {0x0000b04c, 0x03960395},
+       {0x0000b050, 0x00000000},
+       {0x0000b054, 0x00000000},
+       {0x0000b058, 0x00000000},
+       {0x0000b05c, 0x00000000},
+       {0x0000b060, 0x00000000},
+       {0x0000b064, 0x00000000},
+       {0x0000b068, 0x00000000},
+       {0x0000b06c, 0x00000000},
+       {0x0000b070, 0x00000000},
+       {0x0000b074, 0x00000000},
+       {0x0000b078, 0x00000000},
+       {0x0000b07c, 0x00000000},
+       {0x0000b080, 0x2a2d2f32},
+       {0x0000b084, 0x21232328},
+       {0x0000b088, 0x19191c1e},
+       {0x0000b08c, 0x12141417},
+       {0x0000b090, 0x07070e0e},
+       {0x0000b094, 0x03030305},
+       {0x0000b098, 0x00000003},
+       {0x0000b09c, 0x00000000},
+       {0x0000b0a0, 0x00000000},
+       {0x0000b0a4, 0x00000000},
+       {0x0000b0a8, 0x00000000},
+       {0x0000b0ac, 0x00000000},
+       {0x0000b0b0, 0x00000000},
+       {0x0000b0b4, 0x00000000},
+       {0x0000b0b8, 0x00000000},
+       {0x0000b0bc, 0x00000000},
+       {0x0000b0c0, 0x003f0020},
+       {0x0000b0c4, 0x00400041},
+       {0x0000b0c8, 0x0140005f},
+       {0x0000b0cc, 0x0160015f},
+       {0x0000b0d0, 0x017e017f},
+       {0x0000b0d4, 0x02410242},
+       {0x0000b0d8, 0x025f0240},
+       {0x0000b0dc, 0x027f0260},
+       {0x0000b0e0, 0x0341027e},
+       {0x0000b0e4, 0x035f0340},
+       {0x0000b0e8, 0x037f0360},
+       {0x0000b0ec, 0x04400441},
+       {0x0000b0f0, 0x0460045f},
+       {0x0000b0f4, 0x0541047f},
+       {0x0000b0f8, 0x055f0540},
+       {0x0000b0fc, 0x057f0560},
+       {0x0000b100, 0x06400641},
+       {0x0000b104, 0x0660065f},
+       {0x0000b108, 0x067e067f},
+       {0x0000b10c, 0x07410742},
+       {0x0000b110, 0x075f0740},
+       {0x0000b114, 0x077f0760},
+       {0x0000b118, 0x07800781},
+       {0x0000b11c, 0x07a0079f},
+       {0x0000b120, 0x07c107bf},
+       {0x0000b124, 0x000007c0},
+       {0x0000b128, 0x00000000},
+       {0x0000b12c, 0x00000000},
+       {0x0000b130, 0x00000000},
+       {0x0000b134, 0x00000000},
+       {0x0000b138, 0x00000000},
+       {0x0000b13c, 0x00000000},
+       {0x0000b140, 0x003f0020},
+       {0x0000b144, 0x00400041},
+       {0x0000b148, 0x0140005f},
+       {0x0000b14c, 0x0160015f},
+       {0x0000b150, 0x017e017f},
+       {0x0000b154, 0x02410242},
+       {0x0000b158, 0x025f0240},
+       {0x0000b15c, 0x027f0260},
+       {0x0000b160, 0x0341027e},
+       {0x0000b164, 0x035f0340},
+       {0x0000b168, 0x037f0360},
+       {0x0000b16c, 0x04400441},
+       {0x0000b170, 0x0460045f},
+       {0x0000b174, 0x0541047f},
+       {0x0000b178, 0x055f0540},
+       {0x0000b17c, 0x057f0560},
+       {0x0000b180, 0x06400641},
+       {0x0000b184, 0x0660065f},
+       {0x0000b188, 0x067e067f},
+       {0x0000b18c, 0x07410742},
+       {0x0000b190, 0x075f0740},
+       {0x0000b194, 0x077f0760},
+       {0x0000b198, 0x07800781},
+       {0x0000b19c, 0x07a0079f},
+       {0x0000b1a0, 0x07c107bf},
+       {0x0000b1a4, 0x000007c0},
+       {0x0000b1a8, 0x00000000},
+       {0x0000b1ac, 0x00000000},
+       {0x0000b1b0, 0x00000000},
+       {0x0000b1b4, 0x00000000},
+       {0x0000b1b8, 0x00000000},
+       {0x0000b1bc, 0x00000000},
+       {0x0000b1c0, 0x00000000},
+       {0x0000b1c4, 0x00000000},
+       {0x0000b1c8, 0x00000000},
+       {0x0000b1cc, 0x00000000},
+       {0x0000b1d0, 0x00000000},
+       {0x0000b1d4, 0x00000000},
+       {0x0000b1d8, 0x00000000},
+       {0x0000b1dc, 0x00000000},
+       {0x0000b1e0, 0x00000000},
+       {0x0000b1e4, 0x00000000},
+       {0x0000b1e8, 0x00000000},
+       {0x0000b1ec, 0x00000000},
+       {0x0000b1f0, 0x00000396},
+       {0x0000b1f4, 0x00000396},
+       {0x0000b1f8, 0x00000396},
+       {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar9462_2p1_modes_low_ob_db_tx_gain[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+       {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+       {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+       {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+       {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
+       {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
+       {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
+       {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
+       {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+       {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
+       {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
+       {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
+       {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
+       {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
+       {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
+       {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
+       {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
+       {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
+       {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
+       {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
+       {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
+       {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
+       {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
+       {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
+       {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
+       {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
+       {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+       {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+       {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+       {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+       {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+       {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+       {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+       {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+       {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+       {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+       {0x00016044, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+       {0x00016048, 0x64992060, 0x64992060, 0x64992060, 0x64992060},
+       {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+       {0x00016444, 0x012482d4, 0x012482d4, 0x012482d4, 0x012482d4},
+       {0x00016448, 0x64992000, 0x64992000, 0x64992000, 0x64992000},
+       {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+};
+
+static const u32 ar9462_2p1_modes_high_ob_db_tx_gain[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+       {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+       {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+       {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+       {0x0000a410, 0x000050da, 0x000050da, 0x000050de, 0x000050de},
+       {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+       {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+       {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+       {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
+       {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
+       {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+       {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
+       {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
+       {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
+       {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
+       {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
+       {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
+       {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
+       {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
+       {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
+       {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
+       {0x0000a548, 0x55025eb3, 0x55025eb3, 0x3e001a81, 0x3e001a81},
+       {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x42001a83, 0x42001a83},
+       {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001a84, 0x44001a84},
+       {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
+       {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
+       {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
+       {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
+       {0x0000a564, 0x751ffff6, 0x751ffff6, 0x56001eec, 0x56001eec},
+       {0x0000a568, 0x751ffff6, 0x751ffff6, 0x58001ef0, 0x58001ef0},
+       {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x5a001ef4, 0x5a001ef4},
+       {0x0000a570, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+       {0x0000a574, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+       {0x0000a578, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+       {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+       {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+       {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+       {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+       {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+       {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+       {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+       {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+       {0x00016044, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+       {0x00016048, 0x8db49060, 0x8db49060, 0x8db49060, 0x8db49060},
+       {0x00016054, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+       {0x00016444, 0x056d82e4, 0x056d82e4, 0x056d82e4, 0x056d82e4},
+       {0x00016448, 0x8db49000, 0x8db49000, 0x8db49000, 0x8db49000},
+       {0x00016454, 0x6db60000, 0x6db60000, 0x6db60000, 0x6db60000},
+};
+
+static const u32 ar9462_2p1_modes_mix_ob_db_tx_gain[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+       {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+       {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+       {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+       {0x0000a410, 0x0000d0da, 0x0000d0da, 0x0000d0de, 0x0000d0de},
+       {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+       {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
+       {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
+       {0x0000a514, 0x18022622, 0x18022622, 0x12000400, 0x12000400},
+       {0x0000a518, 0x1b022822, 0x1b022822, 0x16000402, 0x16000402},
+       {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
+       {0x0000a520, 0x22022c41, 0x22022c41, 0x1c000603, 0x1c000603},
+       {0x0000a524, 0x28023042, 0x28023042, 0x21000a02, 0x21000a02},
+       {0x0000a528, 0x2c023044, 0x2c023044, 0x25000a04, 0x25000a04},
+       {0x0000a52c, 0x2f023644, 0x2f023644, 0x28000a20, 0x28000a20},
+       {0x0000a530, 0x34025643, 0x34025643, 0x2c000e20, 0x2c000e20},
+       {0x0000a534, 0x38025a44, 0x38025a44, 0x30000e22, 0x30000e22},
+       {0x0000a538, 0x3b025e45, 0x3b025e45, 0x34000e24, 0x34000e24},
+       {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x38001640, 0x38001640},
+       {0x0000a540, 0x48025e6c, 0x48025e6c, 0x3c001660, 0x3c001660},
+       {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3f001861, 0x3f001861},
+       {0x0000a548, 0x55025eb3, 0x55025eb3, 0x43001a81, 0x43001a81},
+       {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x47001a83, 0x47001a83},
+       {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x4a001c84, 0x4a001c84},
+       {0x0000a554, 0x62025f56, 0x62025f56, 0x4e001ce3, 0x4e001ce3},
+       {0x0000a558, 0x66027f56, 0x66027f56, 0x52001ce5, 0x52001ce5},
+       {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x56001ce9, 0x56001ce9},
+       {0x0000a560, 0x70049f56, 0x70049f56, 0x5a001ceb, 0x5a001ceb},
+       {0x0000a564, 0x751ffff6, 0x751ffff6, 0x5c001eec, 0x5c001eec},
+       {0x0000a568, 0x751ffff6, 0x751ffff6, 0x5e001ef0, 0x5e001ef0},
+       {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x60001ef4, 0x60001ef4},
+       {0x0000a570, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+       {0x0000a574, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+       {0x0000a578, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+       {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x62001ff6, 0x62001ff6},
+       {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+       {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+       {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+       {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+       {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+       {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+       {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+       {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+       {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+       {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+       {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+       {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+};
+
+static const u32 ar9462_2p1_modes_fast_clock[][3] = {
+       /* Addr      5G_HT20     5G_HT40   */
+       {0x00001030, 0x00000268, 0x000004d0},
+       {0x00001070, 0x0000018c, 0x00000318},
+       {0x000010b0, 0x00000fd0, 0x00001fa0},
+       {0x00008014, 0x044c044c, 0x08980898},
+       {0x0000801c, 0x148ec02b, 0x148ec057},
+       {0x00008318, 0x000044c0, 0x00008980},
+       {0x00009e00, 0x0372131c, 0x0372131c},
+       {0x0000a230, 0x0000400b, 0x00004016},
+       {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9462_2p1_baseband_core_txfir_coeff_japan_2484[][2] = {
+       /* Addr      allmodes  */
+       {0x0000a398, 0x00000000},
+       {0x0000a39c, 0x6f7f0301},
+       {0x0000a3a0, 0xca9228ee},
+};
+
+#endif /* INITVALS_9462_2P1_H */
index 42b03dc39d1423561dff8fb08f95be67f54ea5bb..c1224b5a257b86e6d9ab3ee8b7a38fa7d72fe7b3 100644 (file)
@@ -296,6 +296,7 @@ struct ath_tx {
        struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
        struct ath_descdma txdma;
        struct ath_txq *txq_map[IEEE80211_NUM_ACS];
+       struct ath_txq *uapsdq;
        u32 txq_max_pending[IEEE80211_NUM_ACS];
        u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
 };
@@ -343,6 +344,8 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                 struct ath_tx_control *txctl);
+void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                struct sk_buff *skb);
 void ath_tx_tasklet(struct ath_softc *sc);
 void ath_tx_edma_tasklet(struct ath_softc *sc);
 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -353,6 +356,11 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
 void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
                       struct ath_node *an);
+void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
+                                  struct ieee80211_sta *sta,
+                                  u16 tids, int nframes,
+                                  enum ieee80211_frame_release_type reason,
+                                  bool more_data);
 
 /********/
 /* VIFs */
@@ -623,6 +631,11 @@ void ath_ant_comb_update(struct ath_softc *sc);
 /* Main driver core */
 /********************/
 
+#define ATH9K_PCI_CUS198 0x0001
+#define ATH9K_PCI_CUS230 0x0002
+#define ATH9K_PCI_CUS217 0x0004
+#define ATH9K_PCI_WOW    0x0008
+
 /*
  * Default cache line size, in bytes.
  * Used when PCI device not fully initialized by bootrom/BIOS
@@ -642,6 +655,7 @@ enum sc_op_flags {
        SC_OP_ANI_RUN,
        SC_OP_PRIM_STA_VIF,
        SC_OP_HW_RESET,
+       SC_OP_SCANNING,
 };
 
 /* Powersave flags */
@@ -706,6 +720,7 @@ struct ath_softc {
 
        unsigned int hw_busy_count;
        unsigned long sc_flags;
+       unsigned long driver_data;
 
        u32 intrstatus;
        u16 ps_flags; /* PS_* */
@@ -755,7 +770,6 @@ struct ath_softc {
        struct rchan *rfs_chan_spec_scan;
        enum spectral_mode spectral_mode;
        struct ath_spec_scan spec_config;
-       int scanning;
 
 #ifdef CONFIG_PM_SLEEP
        atomic_t wow_got_bmiss_intr;
index 2ff570f7f8ffb49e563d0f7e6cb4d3bace3cc6da..1a17732bb089ca4b58e7d02880f4817b5f95a093 100644 (file)
@@ -39,7 +39,8 @@ static void ath9k_beaconq_config(struct ath_softc *sc)
 
        ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
 
-       if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
+       if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
+           sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
                /* Always burst out beacon and CAB traffic. */
                qi.tqi_aifs = 1;
                qi.tqi_cwmin = 0;
@@ -107,23 +108,6 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
        ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
 }
 
-static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
-       struct ath_softc *sc = hw->priv;
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_tx_control txctl;
-
-       memset(&txctl, 0, sizeof(struct ath_tx_control));
-       txctl.txq = sc->beacon.cabq;
-
-       ath_dbg(common, XMIT, "transmitting CABQ packet, skb: %p\n", skb);
-
-       if (ath_tx_start(hw, skb, &txctl) != 0) {
-               ath_dbg(common, XMIT, "CABQ TX failed\n");
-               ieee80211_free_txskb(hw, skb);
-       }
-}
-
 static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
                                             struct ieee80211_vif *vif)
 {
@@ -205,10 +189,8 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
 
        ath9k_beacon_setup(sc, vif, bf, info->control.rates[0].idx);
 
-       while (skb) {
-               ath9k_tx_cabq(hw, skb);
-               skb = ieee80211_get_buffered_bc(hw, vif);
-       }
+       if (skb)
+               ath_tx_cabq(hw, vif, skb);
 
        return bf;
 }
@@ -273,7 +255,8 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc)
        u64 tsf;
        int slot;
 
-       if (sc->sc_ah->opmode != NL80211_IFTYPE_AP) {
+       if (sc->sc_ah->opmode != NL80211_IFTYPE_AP &&
+           sc->sc_ah->opmode != NL80211_IFTYPE_MESH_POINT) {
                ath_dbg(common, BEACON, "slot 0, tsf: %llu\n",
                        ath9k_hw_gettsf64(sc->sc_ah));
                return 0;
@@ -765,10 +748,10 @@ void ath9k_set_beacon(struct ath_softc *sc)
 
        switch (sc->sc_ah->opmode) {
        case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_MESH_POINT:
                ath9k_beacon_config_ap(sc, cur_conf);
                break;
        case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_MESH_POINT:
                ath9k_beacon_config_adhoc(sc, cur_conf);
                break;
        case NL80211_IFTYPE_STATION:
index 7304e7585009c03ec1faeaa7d013e5ded4eea5d3..5e8219a91e252b3d4a9ac2f5fda599caf4ae9ae8 100644 (file)
@@ -387,7 +387,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
 
        if (!caldata) {
                chan->noisefloor = nf;
-               ah->noise = ath9k_hw_getchan_noise(ah, chan);
                return false;
        }
 
index b37eb8d38811587a72bd575805c40c0680387c3f..2721f52f0177ba7a4402521328a6029c80ed57c4 100644 (file)
@@ -173,25 +173,69 @@ static const struct file_operations fops_rx_chainmask = {
        .llseek = default_llseek,
 };
 
-static ssize_t read_file_disable_ani(struct file *file, char __user *user_buf,
+static ssize_t read_file_ani(struct file *file, char __user *user_buf,
                             size_t count, loff_t *ppos)
 {
        struct ath_softc *sc = file->private_data;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       char buf[32];
-       unsigned int len;
+       struct ath_hw *ah = sc->sc_ah;
+       unsigned int len = 0, size = 1024;
+       ssize_t retval = 0;
+       char *buf;
 
-       len = sprintf(buf, "%d\n", common->disable_ani);
-       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       buf = kzalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       if (common->disable_ani) {
+               len += snprintf(buf + len, size - len, "%s: %s\n",
+                               "ANI", "DISABLED");
+               goto exit;
+       }
+
+       len += snprintf(buf + len, size - len, "%15s: %s\n",
+                       "ANI", "ENABLED");
+       len += snprintf(buf + len, size - len, "%15s: %u\n",
+                       "ANI RESET", ah->stats.ast_ani_reset);
+       len += snprintf(buf + len, size - len, "%15s: %u\n",
+                       "SPUR UP", ah->stats.ast_ani_spurup);
+       len += snprintf(buf + len, size - len, "%15s: %u\n",
+                       "SPUR DOWN", ah->stats.ast_ani_spurup);
+       len += snprintf(buf + len, size - len, "%15s: %u\n",
+                       "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
+       len += snprintf(buf + len, size - len, "%15s: %u\n",
+                       "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
+       len += snprintf(buf + len, size - len, "%15s: %u\n",
+                       "MRC-CCK ON", ah->stats.ast_ani_ccklow);
+       len += snprintf(buf + len, size - len, "%15s: %u\n",
+                       "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
+       len += snprintf(buf + len, size - len, "%15s: %u\n",
+                       "FIR-STEP UP", ah->stats.ast_ani_stepup);
+       len += snprintf(buf + len, size - len, "%15s: %u\n",
+                       "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
+       len += snprintf(buf + len, size - len, "%15s: %u\n",
+                       "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
+       len += snprintf(buf + len, size - len, "%15s: %u\n",
+                       "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
+       len += snprintf(buf + len, size - len, "%15s: %u\n",
+                       "CCK ERRORS", ah->stats.ast_ani_cckerrs);
+exit:
+       if (len > size)
+               len = size;
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
 }
 
-static ssize_t write_file_disable_ani(struct file *file,
-                                     const char __user *user_buf,
-                                     size_t count, loff_t *ppos)
+static ssize_t write_file_ani(struct file *file,
+                             const char __user *user_buf,
+                             size_t count, loff_t *ppos)
 {
        struct ath_softc *sc = file->private_data;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       unsigned long disable_ani;
+       unsigned long ani;
        char buf[32];
        ssize_t len;
 
@@ -200,12 +244,15 @@ static ssize_t write_file_disable_ani(struct file *file,
                return -EFAULT;
 
        buf[len] = '\0';
-       if (strict_strtoul(buf, 0, &disable_ani))
+       if (strict_strtoul(buf, 0, &ani))
                return -EINVAL;
 
-       common->disable_ani = !!disable_ani;
+       if (ani < 0 || ani > 1)
+               return -EINVAL;
+
+       common->disable_ani = !ani;
 
-       if (disable_ani) {
+       if (common->disable_ani) {
                clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
                ath_stop_ani(sc);
        } else {
@@ -215,9 +262,9 @@ static ssize_t write_file_disable_ani(struct file *file,
        return count;
 }
 
-static const struct file_operations fops_disable_ani = {
-       .read = read_file_disable_ani,
-       .write = write_file_disable_ani,
+static const struct file_operations fops_ani = {
+       .read = read_file_ani,
+       .write = write_file_ani,
        .open = simple_open,
        .owner = THIS_MODULE,
        .llseek = default_llseek,
@@ -738,8 +785,6 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
                       struct ath_tx_status *ts, struct ath_txq *txq,
                       unsigned int flags)
 {
-#define TX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].ts\
-                       [sc->debug.tsidx].c)
        int qnum = txq->axq_qnum;
 
        TX_STAT_INC(qnum, tx_pkts_all);
@@ -771,37 +816,6 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
                TX_STAT_INC(qnum, data_underrun);
        if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
                TX_STAT_INC(qnum, delim_underrun);
-
-#ifdef CONFIG_ATH9K_MAC_DEBUG
-       spin_lock(&sc->debug.samp_lock);
-       TX_SAMP_DBG(jiffies) = jiffies;
-       TX_SAMP_DBG(rssi_ctl0) = ts->ts_rssi_ctl0;
-       TX_SAMP_DBG(rssi_ctl1) = ts->ts_rssi_ctl1;
-       TX_SAMP_DBG(rssi_ctl2) = ts->ts_rssi_ctl2;
-       TX_SAMP_DBG(rssi_ext0) = ts->ts_rssi_ext0;
-       TX_SAMP_DBG(rssi_ext1) = ts->ts_rssi_ext1;
-       TX_SAMP_DBG(rssi_ext2) = ts->ts_rssi_ext2;
-       TX_SAMP_DBG(rateindex) = ts->ts_rateindex;
-       TX_SAMP_DBG(isok) = !!(ts->ts_status & ATH9K_TXERR_MASK);
-       TX_SAMP_DBG(rts_fail_cnt) = ts->ts_shortretry;
-       TX_SAMP_DBG(data_fail_cnt) = ts->ts_longretry;
-       TX_SAMP_DBG(rssi) = ts->ts_rssi;
-       TX_SAMP_DBG(tid) = ts->tid;
-       TX_SAMP_DBG(qid) = ts->qid;
-
-       if (ts->ts_flags & ATH9K_TX_BA) {
-               TX_SAMP_DBG(ba_low) = ts->ba_low;
-               TX_SAMP_DBG(ba_high) = ts->ba_high;
-       } else {
-               TX_SAMP_DBG(ba_low) = 0;
-               TX_SAMP_DBG(ba_high) = 0;
-       }
-
-       sc->debug.tsidx = (sc->debug.tsidx + 1) % ATH_DBG_MAX_SAMPLES;
-       spin_unlock(&sc->debug.samp_lock);
-#endif
-
-#undef TX_SAMP_DBG
 }
 
 static const struct file_operations fops_xmit = {
@@ -915,8 +929,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
 void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
 {
 #define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
-#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
-                       [sc->debug.rsidx].c)
 
        RX_STAT_INC(rx_pkts_all);
        sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen;
@@ -940,27 +952,7 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
                        RX_PHY_ERR_INC(rs->rs_phyerr);
        }
 
-#ifdef CONFIG_ATH9K_MAC_DEBUG
-       spin_lock(&sc->debug.samp_lock);
-       RX_SAMP_DBG(jiffies) = jiffies;
-       RX_SAMP_DBG(rssi_ctl0) = rs->rs_rssi_ctl0;
-       RX_SAMP_DBG(rssi_ctl1) = rs->rs_rssi_ctl1;
-       RX_SAMP_DBG(rssi_ctl2) = rs->rs_rssi_ctl2;
-       RX_SAMP_DBG(rssi_ext0) = rs->rs_rssi_ext0;
-       RX_SAMP_DBG(rssi_ext1) = rs->rs_rssi_ext1;
-       RX_SAMP_DBG(rssi_ext2) = rs->rs_rssi_ext2;
-       RX_SAMP_DBG(antenna) = rs->rs_antenna;
-       RX_SAMP_DBG(rssi) = rs->rs_rssi;
-       RX_SAMP_DBG(rate) = rs->rs_rate;
-       RX_SAMP_DBG(is_mybeacon) = rs->is_mybeacon;
-
-       sc->debug.rsidx = (sc->debug.rsidx + 1) % ATH_DBG_MAX_SAMPLES;
-       spin_unlock(&sc->debug.samp_lock);
-
-#endif
-
 #undef RX_PHY_ERR_INC
-#undef RX_SAMP_DBG
 }
 
 static const struct file_operations fops_recv = {
@@ -1485,283 +1477,6 @@ static const struct file_operations fops_modal_eeprom = {
        .llseek = default_llseek,
 };
 
-#ifdef CONFIG_ATH9K_MAC_DEBUG
-
-void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
-{
-#define ATH_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].c)
-       struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
-       unsigned long flags;
-       int i;
-
-       ath9k_ps_wakeup(sc);
-
-       spin_lock_bh(&sc->debug.samp_lock);
-
-       spin_lock_irqsave(&common->cc_lock, flags);
-       ath_hw_cycle_counters_update(common);
-
-       ATH_SAMP_DBG(cc.cycles) = common->cc_ani.cycles;
-       ATH_SAMP_DBG(cc.rx_busy) = common->cc_ani.rx_busy;
-       ATH_SAMP_DBG(cc.rx_frame) = common->cc_ani.rx_frame;
-       ATH_SAMP_DBG(cc.tx_frame) = common->cc_ani.tx_frame;
-       spin_unlock_irqrestore(&common->cc_lock, flags);
-
-       ATH_SAMP_DBG(noise) = ah->noise;
-
-       REG_WRITE_D(ah, AR_MACMISC,
-                 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
-                  (AR_MACMISC_MISC_OBS_BUS_1 <<
-                   AR_MACMISC_MISC_OBS_BUS_MSB_S)));
-
-       for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
-               ATH_SAMP_DBG(dma_dbg_reg_vals[i]) = REG_READ_D(ah,
-                               AR_DMADBG_0 + (i * sizeof(u32)));
-
-       ATH_SAMP_DBG(pcu_obs) = REG_READ_D(ah, AR_OBS_BUS_1);
-       ATH_SAMP_DBG(pcu_cr) = REG_READ_D(ah, AR_CR);
-
-       memcpy(ATH_SAMP_DBG(nfCalHist), sc->caldata.nfCalHist,
-                       sizeof(ATH_SAMP_DBG(nfCalHist)));
-
-       sc->debug.sampidx = (sc->debug.sampidx + 1) % ATH_DBG_MAX_SAMPLES;
-       spin_unlock_bh(&sc->debug.samp_lock);
-       ath9k_ps_restore(sc);
-
-#undef ATH_SAMP_DBG
-}
-
-static int open_file_bb_mac_samps(struct inode *inode, struct file *file)
-{
-#define ATH_SAMP_DBG(c) bb_mac_samp[sampidx].c
-       struct ath_softc *sc = inode->i_private;
-       struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
-       struct ieee80211_conf *conf = &common->hw->conf;
-       struct ath_dbg_bb_mac_samp *bb_mac_samp;
-       struct ath9k_nfcal_hist *h;
-       int i, j, qcuOffset = 0, dcuOffset = 0;
-       u32 *qcuBase, *dcuBase, size = 30000, len = 0;
-       u32 sampidx = 0;
-       u8 *buf;
-       u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
-       u8 nread;
-
-       if (test_bit(SC_OP_INVALID, &sc->sc_flags))
-               return -EAGAIN;
-
-       buf = vmalloc(size);
-       if (!buf)
-               return -ENOMEM;
-       bb_mac_samp = vmalloc(sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
-       if (!bb_mac_samp) {
-               vfree(buf);
-               return -ENOMEM;
-       }
-       /* Account the current state too */
-       ath9k_debug_samp_bb_mac(sc);
-
-       spin_lock_bh(&sc->debug.samp_lock);
-       memcpy(bb_mac_samp, sc->debug.bb_mac_samp,
-                       sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
-       len += snprintf(buf + len, size - len,
-                       "Current Sample Index: %d\n", sc->debug.sampidx);
-       spin_unlock_bh(&sc->debug.samp_lock);
-
-       len += snprintf(buf + len, size - len,
-                       "Raw DMA Debug Dump:\n");
-       len += snprintf(buf + len, size - len, "Sample |\t");
-       for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
-               len += snprintf(buf + len, size - len, " DMA Reg%d |\t", i);
-       len += snprintf(buf + len, size - len, "\n");
-
-       for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
-               len += snprintf(buf + len, size - len, "%d\t", sampidx);
-
-               for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
-                       len += snprintf(buf + len, size - len, " %08x\t",
-                                       ATH_SAMP_DBG(dma_dbg_reg_vals[i]));
-               len += snprintf(buf + len, size - len, "\n");
-       }
-       len += snprintf(buf + len, size - len, "\n");
-
-       len += snprintf(buf + len, size - len,
-                       "Sample Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
-       for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
-               qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
-               dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
-
-               for (i = 0; i < ATH9K_NUM_QUEUES; i++,
-                               qcuOffset += 4, dcuOffset += 5) {
-                       if (i == 8) {
-                               qcuOffset = 0;
-                               qcuBase++;
-                       }
-
-                       if (i == 6) {
-                               dcuOffset = 0;
-                               dcuBase++;
-                       }
-                       if (!sc->debug.stats.txstats[i].queued)
-                               continue;
-
-                       len += snprintf(buf + len, size - len,
-                               "%4d %7d    %2x      %1x     %2x         %2x\n",
-                               sampidx, i,
-                               (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
-                               (*qcuBase & (0x8 << qcuOffset)) >>
-                               (qcuOffset + 3),
-                               ATH_SAMP_DBG(dma_dbg_reg_vals[2]) &
-                               (0x7 << (i * 3)) >> (i * 3),
-                               (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
-               }
-               len += snprintf(buf + len, size - len, "\n");
-       }
-       len += snprintf(buf + len, size - len,
-                       "samp qcu_sh qcu_fh qcu_comp dcu_comp dcu_arb dcu_fp "
-                       "ch_idle_dur ch_idle_dur_val txfifo_val0 txfifo_val1 "
-                       "txfifo_dcu0 txfifo_dcu1 pcu_obs AR_CR\n");
-
-       for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
-               qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
-               dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
-
-               len += snprintf(buf + len, size - len, "%4d %5x %5x ", sampidx,
-                       (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x003c0000) >> 18,
-                       (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x03c00000) >> 22);
-               len += snprintf(buf + len, size - len, "%7x %8x ",
-                       (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x1c000000) >> 26,
-                       (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x3));
-               len += snprintf(buf + len, size - len, "%7x %7x ",
-                       (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x06000000) >> 25,
-                       (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x38000000) >> 27);
-               len += snprintf(buf + len, size - len, "%7d %12d ",
-                       (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x000003fc) >> 2,
-                       (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000400) >> 10);
-               len += snprintf(buf + len, size - len, "%12d %12d ",
-                       (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000800) >> 11,
-                       (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00001000) >> 12);
-               len += snprintf(buf + len, size - len, "%12d %12d ",
-                       (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x0001e000) >> 13,
-                       (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x001e0000) >> 17);
-               len += snprintf(buf + len, size - len, "0x%07x 0x%07x\n",
-                               ATH_SAMP_DBG(pcu_obs), ATH_SAMP_DBG(pcu_cr));
-       }
-
-       len += snprintf(buf + len, size - len,
-                       "Sample ChNoise Chain privNF #Reading Readings\n");
-       for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
-               h = ATH_SAMP_DBG(nfCalHist);
-               if (!ATH_SAMP_DBG(noise))
-                       continue;
-
-               for (i = 0; i < NUM_NF_READINGS; i++) {
-                       if (!(chainmask & (1 << i)) ||
-                           ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
-                               continue;
-
-                       nread = AR_PHY_CCA_FILTERWINDOW_LENGTH -
-                               h[i].invalidNFcount;
-                       len += snprintf(buf + len, size - len,
-                                       "%4d %5d %4d\t   %d\t %d\t",
-                                       sampidx, ATH_SAMP_DBG(noise),
-                                       i, h[i].privNF, nread);
-                       for (j = 0; j < nread; j++)
-                               len += snprintf(buf + len, size - len,
-                                       " %d", h[i].nfCalBuffer[j]);
-                       len += snprintf(buf + len, size - len, "\n");
-               }
-       }
-       len += snprintf(buf + len, size - len, "\nCycle counters:\n"
-                       "Sample Total    Rxbusy   Rxframes Txframes\n");
-       for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
-               if (!ATH_SAMP_DBG(cc.cycles))
-                       continue;
-               len += snprintf(buf + len, size - len,
-                               "%4d %08x %08x %08x %08x\n",
-                               sampidx, ATH_SAMP_DBG(cc.cycles),
-                               ATH_SAMP_DBG(cc.rx_busy),
-                               ATH_SAMP_DBG(cc.rx_frame),
-                               ATH_SAMP_DBG(cc.tx_frame));
-       }
-
-       len += snprintf(buf + len, size - len, "Tx status Dump :\n");
-       len += snprintf(buf + len, size - len,
-                       "Sample rssi:- ctl0 ctl1 ctl2 ext0 ext1 ext2 comb "
-                       "isok rts_fail data_fail rate tid qid "
-                                       "ba_low  ba_high tx_before(ms)\n");
-       for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
-               for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
-                       if (!ATH_SAMP_DBG(ts[i].jiffies))
-                               continue;
-                       len += snprintf(buf + len, size - len, "%-14d"
-                               "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-4d %-8d "
-                               "%-9d %-4d %-3d %-3d %08x %08x %-11d\n",
-                               sampidx,
-                               ATH_SAMP_DBG(ts[i].rssi_ctl0),
-                               ATH_SAMP_DBG(ts[i].rssi_ctl1),
-                               ATH_SAMP_DBG(ts[i].rssi_ctl2),
-                               ATH_SAMP_DBG(ts[i].rssi_ext0),
-                               ATH_SAMP_DBG(ts[i].rssi_ext1),
-                               ATH_SAMP_DBG(ts[i].rssi_ext2),
-                               ATH_SAMP_DBG(ts[i].rssi),
-                               ATH_SAMP_DBG(ts[i].isok),
-                               ATH_SAMP_DBG(ts[i].rts_fail_cnt),
-                               ATH_SAMP_DBG(ts[i].data_fail_cnt),
-                               ATH_SAMP_DBG(ts[i].rateindex),
-                               ATH_SAMP_DBG(ts[i].tid),
-                               ATH_SAMP_DBG(ts[i].qid),
-                               ATH_SAMP_DBG(ts[i].ba_low),
-                               ATH_SAMP_DBG(ts[i].ba_high),
-                               jiffies_to_msecs(jiffies -
-                                       ATH_SAMP_DBG(ts[i].jiffies)));
-               }
-       }
-
-       len += snprintf(buf + len, size - len, "Rx status Dump :\n");
-       len += snprintf(buf + len, size - len, "Sample rssi:- ctl0 ctl1 ctl2 "
-                       "ext0 ext1 ext2 comb beacon ant rate rx_before(ms)\n");
-       for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
-               for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
-                       if (!ATH_SAMP_DBG(rs[i].jiffies))
-                               continue;
-                       len += snprintf(buf + len, size - len, "%-14d"
-                               "%-4d %-4d %-4d %-4d %-4d %-4d %-4d %-9s %-2d %02x %-13d\n",
-                               sampidx,
-                               ATH_SAMP_DBG(rs[i].rssi_ctl0),
-                               ATH_SAMP_DBG(rs[i].rssi_ctl1),
-                               ATH_SAMP_DBG(rs[i].rssi_ctl2),
-                               ATH_SAMP_DBG(rs[i].rssi_ext0),
-                               ATH_SAMP_DBG(rs[i].rssi_ext1),
-                               ATH_SAMP_DBG(rs[i].rssi_ext2),
-                               ATH_SAMP_DBG(rs[i].rssi),
-                               ATH_SAMP_DBG(rs[i].is_mybeacon) ?
-                               "True" : "False",
-                               ATH_SAMP_DBG(rs[i].antenna),
-                               ATH_SAMP_DBG(rs[i].rate),
-                               jiffies_to_msecs(jiffies -
-                                       ATH_SAMP_DBG(rs[i].jiffies)));
-               }
-       }
-
-       vfree(bb_mac_samp);
-       file->private_data = buf;
-
-       return 0;
-#undef ATH_SAMP_DBG
-}
-
-static const struct file_operations fops_samps = {
-       .open = open_file_bb_mac_samps,
-       .read = ath9k_debugfs_read_buf,
-       .release = ath9k_debugfs_release_buf,
-       .owner = THIS_MODULE,
-       .llseek = default_llseek,
-};
-
-#endif
-
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
                                size_t count, loff_t *ppos)
@@ -2059,8 +1774,8 @@ int ath9k_init_debug(struct ath_hw *ah)
                            sc->debug.debugfs_phy, sc, &fops_rx_chainmask);
        debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
                            sc->debug.debugfs_phy, sc, &fops_tx_chainmask);
-       debugfs_create_file("disable_ani", S_IRUSR | S_IWUSR,
-                           sc->debug.debugfs_phy, sc, &fops_disable_ani);
+       debugfs_create_file("ani", S_IRUSR | S_IWUSR,
+                           sc->debug.debugfs_phy, sc, &fops_ani);
        debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
                            &sc->sc_ah->config.enable_paprd);
        debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
@@ -2095,11 +1810,6 @@ int ath9k_init_debug(struct ath_hw *ah)
        debugfs_create_file("spectral_fft_period", S_IRUSR | S_IWUSR,
                            sc->debug.debugfs_phy, sc,
                            &fops_spectral_fft_period);
-
-#ifdef CONFIG_ATH9K_MAC_DEBUG
-       debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
-                           &fops_samps);
-#endif
        debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
                           sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
        debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
index 9d49aab8b989fe733d98ea27793f2db01874d66a..fc679198a0f38049a385fb53ab25209b23945827 100644 (file)
@@ -251,56 +251,10 @@ struct ath_stats {
        u32 reset[__RESET_TYPE_MAX];
 };
 
-#define ATH_DBG_MAX_SAMPLES    10
-struct ath_dbg_bb_mac_samp {
-       u32 dma_dbg_reg_vals[ATH9K_NUM_DMA_DEBUG_REGS];
-       u32 pcu_obs, pcu_cr, noise;
-       struct {
-               u64 jiffies;
-               int8_t rssi_ctl0;
-               int8_t rssi_ctl1;
-               int8_t rssi_ctl2;
-               int8_t rssi_ext0;
-               int8_t rssi_ext1;
-               int8_t rssi_ext2;
-               int8_t rssi;
-               bool isok;
-               u8 rts_fail_cnt;
-               u8 data_fail_cnt;
-               u8 rateindex;
-               u8 qid;
-               u8 tid;
-               u32 ba_low;
-               u32 ba_high;
-       } ts[ATH_DBG_MAX_SAMPLES];
-       struct {
-               u64 jiffies;
-               int8_t rssi_ctl0;
-               int8_t rssi_ctl1;
-               int8_t rssi_ctl2;
-               int8_t rssi_ext0;
-               int8_t rssi_ext1;
-               int8_t rssi_ext2;
-               int8_t rssi;
-               bool is_mybeacon;
-               u8 antenna;
-               u8 rate;
-       } rs[ATH_DBG_MAX_SAMPLES];
-       struct ath_cycle_counters cc;
-       struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
-};
-
 struct ath9k_debug {
        struct dentry *debugfs_phy;
        u32 regidx;
        struct ath_stats stats;
-#ifdef CONFIG_ATH9K_MAC_DEBUG
-       spinlock_t samp_lock;
-       struct ath_dbg_bb_mac_samp bb_mac_samp[ATH_DBG_MAX_SAMPLES];
-       u8 sampidx;
-       u8 tsidx;
-       u8 rsidx;
-#endif
 };
 
 int ath9k_init_debug(struct ath_hw *ah);
@@ -364,17 +318,4 @@ static inline void ath_debug_stat_rx(struct ath_softc *sc,
 
 #endif /* CONFIG_ATH9K_DEBUGFS */
 
-#ifdef CONFIG_ATH9K_MAC_DEBUG
-
-void ath9k_debug_samp_bb_mac(struct ath_softc *sc);
-
-#else
-
-static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
-{
-}
-
-#endif
-
-
 #endif /* DEBUG_H */
index f5dda84176c3d4ae8277c069df06fa3bbc11922d..9e582e14da74609361b689e5f5741d425fd14659 100644 (file)
@@ -234,10 +234,15 @@ static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
        struct sk_buff *skb;
 
        while ((skb = __skb_dequeue(queue)) != NULL) {
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+               int ln = skb->len;
+#endif
                ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
                                          skb, txok);
-               if (txok)
+               if (txok) {
                        TX_STAT_INC(skb_success);
+                       TX_STAT_ADD(skb_success_bytes, ln);
+               }
                else
                        TX_STAT_INC(skb_failed);
        }
@@ -620,6 +625,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
 
 err:
        for (i = 0; i < pool_index; i++) {
+               RX_STAT_ADD(skb_completed_bytes, skb_pool[i]->len);
                ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
                                 skb_pool[i]->len, USB_WLAN_RX_PIPE);
                RX_STAT_INC(skb_completed);
index d3b099d7898b692b181029c808d41969554035ee..055d7c25e090bd33ce70de75c8352dc22dbb4003 100644 (file)
@@ -142,6 +142,7 @@ struct ath9k_htc_target_aggr {
 #define WLAN_RC_40_FLAG  0x02
 #define WLAN_RC_SGI_FLAG 0x04
 #define WLAN_RC_HT_FLAG  0x08
+#define ATH_RC_TX_STBC_FLAG 0x20
 
 struct ath9k_htc_rateset {
        u8 rs_nrates;
@@ -208,6 +209,9 @@ struct ath9k_htc_target_rx_stats {
                case NL80211_IFTYPE_AP:         \
                        _priv->num_ap_vif++;    \
                        break;                  \
+               case NL80211_IFTYPE_MESH_POINT: \
+                       _priv->num_mbss_vif++;  \
+                       break;                  \
                default:                        \
                        break;                  \
                }                               \
@@ -224,6 +228,9 @@ struct ath9k_htc_target_rx_stats {
                case NL80211_IFTYPE_AP:         \
                        _priv->num_ap_vif--;    \
                        break;                  \
+               case NL80211_IFTYPE_MESH_POINT: \
+                       _priv->num_mbss_vif--;  \
+                       break;                  \
                default:                        \
                        break;                  \
                }                               \
@@ -317,7 +324,9 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
 #ifdef CONFIG_ATH9K_HTC_DEBUGFS
 
 #define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
+#define TX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a)
 #define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
+#define RX_STAT_ADD(c, a) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c += a)
 #define CAB_STAT_INC   priv->debug.tx_stats.cab_queued++
 
 #define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++)
@@ -330,6 +339,7 @@ struct ath_tx_stats {
        u32 buf_completed;
        u32 skb_queued;
        u32 skb_success;
+       u32 skb_success_bytes;
        u32 skb_failed;
        u32 cab_queued;
        u32 queue_stats[IEEE80211_NUM_ACS];
@@ -338,6 +348,7 @@ struct ath_tx_stats {
 struct ath_rx_stats {
        u32 skb_allocated;
        u32 skb_completed;
+       u32 skb_completed_bytes;
        u32 skb_dropped;
        u32 err_crc;
        u32 err_decrypt_crc;
@@ -355,10 +366,20 @@ struct ath9k_debug {
        struct ath_rx_stats rx_stats;
 };
 
+void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             u32 sset, u8 *data);
+int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif, int sset);
+void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           struct ethtool_stats *stats, u64 *data);
 #else
 
 #define TX_STAT_INC(c) do { } while (0)
+#define TX_STAT_ADD(c, a) do { } while (0)
 #define RX_STAT_INC(c) do { } while (0)
+#define RX_STAT_ADD(c, a) do { } while (0)
 #define CAB_STAT_INC   do { } while (0)
 
 #define TX_QSTAT_INC(c) do { } while (0)
@@ -450,6 +471,7 @@ struct ath9k_htc_priv {
        u8 sta_slot;
        u8 vif_sta_pos[ATH9K_HTC_MAX_VIF];
        u8 num_ibss_vif;
+       u8 num_mbss_vif;
        u8 num_sta_vif;
        u8 num_sta_assoc_vif;
        u8 num_ap_vif;
@@ -575,6 +597,8 @@ bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
 void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
 void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
 
+struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv);
+
 #ifdef CONFIG_MAC80211_LEDS
 void ath9k_init_leds(struct ath9k_htc_priv *priv);
 void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
index f13f458dd65674a364d51b6616e88e1c7e358610..e0c03bd641821a165d8fd922bedd14c5f685d321 100644 (file)
@@ -28,7 +28,8 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
 
        ath9k_hw_get_txq_props(ah, priv->beaconq, &qi);
 
-       if (priv->ah->opmode == NL80211_IFTYPE_AP) {
+       if (priv->ah->opmode == NL80211_IFTYPE_AP ||
+           priv->ah->opmode == NL80211_IFTYPE_MESH_POINT) {
                qi.tqi_aifs = 1;
                qi.tqi_cwmin = 0;
                qi.tqi_cwmax = 0;
@@ -628,6 +629,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
        case NL80211_IFTYPE_ADHOC:
                ath9k_htc_beacon_config_adhoc(priv, cur_conf);
                break;
+       case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_AP:
                ath9k_htc_beacon_config_ap(priv, cur_conf);
                break;
@@ -649,6 +651,7 @@ void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
        case NL80211_IFTYPE_ADHOC:
                ath9k_htc_beacon_config_adhoc(priv, cur_conf);
                break;
+       case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_AP:
                ath9k_htc_beacon_config_ap(priv, cur_conf);
                break;
index 87110de577effcafa57cbfba7adad21b67bc2973..7416d58a122c2ad9b22ebcdc7fbbc64c74b2ad02 100644 (file)
@@ -496,21 +496,7 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
        ssize_t retval = 0;
        char *buf;
 
-       /*
-        * This can be done since all the 3 EEPROM families have the
-        * same base header upto a certain point, and we are interested in
-        * the data only upto that point.
-        */
-
-       if (AR_SREV_9271(priv->ah))
-               pBase = (struct base_eep_header *)
-                       &priv->ah->eeprom.map4k.baseEepHeader;
-       else if (priv->ah->hw_version.usbdev == AR9280_USB)
-               pBase = (struct base_eep_header *)
-                       &priv->ah->eeprom.def.baseEepHeader;
-       else if (priv->ah->hw_version.usbdev == AR9287_USB)
-               pBase = (struct base_eep_header *)
-                       &priv->ah->eeprom.map9287.baseEepHeader;
+       pBase = ath9k_htc_get_eeprom_base(priv);
 
        if (pBase == NULL) {
                ath_err(common, "Unknown EEPROM type\n");
@@ -916,6 +902,87 @@ static const struct file_operations fops_modal_eeprom = {
        .llseek = default_llseek,
 };
 
+
+/* Ethtool support for get-stats */
+#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
+static const char ath9k_htc_gstrings_stats[][ETH_GSTRING_LEN] = {
+       "tx_pkts_nic",
+       "tx_bytes_nic",
+       "rx_pkts_nic",
+       "rx_bytes_nic",
+
+       AMKSTR(d_tx_pkts),
+
+       "d_rx_crc_err",
+       "d_rx_decrypt_crc_err",
+       "d_rx_phy_err",
+       "d_rx_mic_err",
+       "d_rx_pre_delim_crc_err",
+       "d_rx_post_delim_crc_err",
+       "d_rx_decrypt_busy_err",
+
+       "d_rx_phyerr_radar",
+       "d_rx_phyerr_ofdm_timing",
+       "d_rx_phyerr_cck_timing",
+
+};
+#define ATH9K_HTC_SSTATS_LEN ARRAY_SIZE(ath9k_htc_gstrings_stats)
+
+void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             u32 sset, u8 *data)
+{
+       if (sset == ETH_SS_STATS)
+               memcpy(data, *ath9k_htc_gstrings_stats,
+                      sizeof(ath9k_htc_gstrings_stats));
+}
+
+int ath9k_htc_get_et_sset_count(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif, int sset)
+{
+       if (sset == ETH_SS_STATS)
+               return ATH9K_HTC_SSTATS_LEN;
+       return 0;
+}
+
+#define STXBASE priv->debug.tx_stats
+#define SRXBASE priv->debug.rx_stats
+#define ASTXQ(a)                                       \
+       data[i++] = STXBASE.a[IEEE80211_AC_BE];         \
+       data[i++] = STXBASE.a[IEEE80211_AC_BK];         \
+       data[i++] = STXBASE.a[IEEE80211_AC_VI];         \
+       data[i++] = STXBASE.a[IEEE80211_AC_VO]
+
+void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
+                           struct ieee80211_vif *vif,
+                           struct ethtool_stats *stats, u64 *data)
+{
+       struct ath9k_htc_priv *priv = hw->priv;
+       int i = 0;
+
+       data[i++] = STXBASE.skb_success;
+       data[i++] = STXBASE.skb_success_bytes;
+       data[i++] = SRXBASE.skb_completed;
+       data[i++] = SRXBASE.skb_completed_bytes;
+
+       ASTXQ(queue_stats);
+
+       data[i++] = SRXBASE.err_crc;
+       data[i++] = SRXBASE.err_decrypt_crc;
+       data[i++] = SRXBASE.err_phy;
+       data[i++] = SRXBASE.err_mic;
+       data[i++] = SRXBASE.err_pre_delim;
+       data[i++] = SRXBASE.err_post_delim;
+       data[i++] = SRXBASE.err_decrypt_busy;
+
+       data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_RADAR];
+       data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_OFDM_TIMING];
+       data[i++] = SRXBASE.err_phy_stats[ATH9K_PHYERR_CCK_TIMING];
+
+       WARN_ON(i != ATH9K_HTC_SSTATS_LEN);
+}
+
+
 int ath9k_htc_init_debug(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
index a47f5e05fc04835d6b747b8ba4553c91d6614178..925c5b0b1cdeb9f1c4137c3f5a9c871a9f28b43f 100644 (file)
@@ -517,6 +517,9 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv,
        ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
                tx_streams, rx_streams);
 
+       if (tx_streams >= 2)
+               ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+
        if (tx_streams != rx_streams) {
                ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
                ht_info->mcs.tx_params |= ((tx_streams - 1) <<
@@ -698,7 +701,8 @@ static const struct ieee80211_iface_limit if_limits[] = {
        { .max = 2,     .types = BIT(NL80211_IFTYPE_STATION) |
                                 BIT(NL80211_IFTYPE_P2P_CLIENT) },
        { .max = 2,     .types = BIT(NL80211_IFTYPE_AP) |
-                                BIT(NL80211_IFTYPE_P2P_GO) },
+                                BIT(NL80211_IFTYPE_P2P_GO) |
+                                BIT(NL80211_IFTYPE_MESH_POINT) },
 };
 
 static const struct ieee80211_iface_combination if_comb = {
@@ -712,6 +716,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
                               struct ieee80211_hw *hw)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct base_eep_header *pBase;
 
        hw->flags = IEEE80211_HW_SIGNAL_DBM |
                IEEE80211_HW_AMPDU_AGGREGATION |
@@ -721,6 +726,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
                IEEE80211_HW_SUPPORTS_PS |
                IEEE80211_HW_PS_NULLFUNC_STACK |
                IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+               IEEE80211_HW_MFP_CAPABLE |
                IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
 
        hw->wiphy->interface_modes =
@@ -728,7 +734,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
                BIT(NL80211_IFTYPE_ADHOC) |
                BIT(NL80211_IFTYPE_AP) |
                BIT(NL80211_IFTYPE_P2P_GO) |
-               BIT(NL80211_IFTYPE_P2P_CLIENT);
+               BIT(NL80211_IFTYPE_P2P_CLIENT) |
+               BIT(NL80211_IFTYPE_MESH_POINT);
 
        hw->wiphy->iface_combinations = &if_comb;
        hw->wiphy->n_iface_combinations = 1;
@@ -765,6 +772,12 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
                                     &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap);
        }
 
+       pBase = ath9k_htc_get_eeprom_base(priv);
+       if (pBase) {
+               hw->wiphy->available_antennas_rx = pBase->rxMask;
+               hw->wiphy->available_antennas_tx = pBase->txMask;
+       }
+
        SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
 }
 
index 62f1b7636c9246c1d7a77b7c7bf28f8db2f17a6b..5c1bec18c9e3bcd51b501de76ee83e4e8ca2aa90 100644 (file)
@@ -113,7 +113,9 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
        struct ath9k_htc_priv *priv = data;
        struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
 
-       if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon)
+       if ((vif->type == NL80211_IFTYPE_AP ||
+            vif->type == NL80211_IFTYPE_MESH_POINT) &&
+           bss_conf->enable_beacon)
                priv->reconfig_beacon = true;
 
        if (bss_conf->assoc) {
@@ -180,6 +182,8 @@ static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv)
                priv->ah->opmode = NL80211_IFTYPE_ADHOC;
        else if (priv->num_ap_vif)
                priv->ah->opmode = NL80211_IFTYPE_AP;
+       else if (priv->num_mbss_vif)
+               priv->ah->opmode = NL80211_IFTYPE_MESH_POINT;
        else
                priv->ah->opmode = NL80211_IFTYPE_STATION;
 
@@ -623,6 +627,8 @@ static void ath9k_htc_setup_rate(struct ath9k_htc_priv *priv,
                trate->rates.ht_rates.rs_nrates = j;
 
                caps = WLAN_RC_HT_FLAG;
+               if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+                       caps |= ATH_RC_TX_STBC_FLAG;
                if (sta->ht_cap.mcs.rx_mask[1])
                        caps |= WLAN_RC_DS_FLAG;
                if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
@@ -810,8 +816,7 @@ void ath9k_htc_ani_work(struct work_struct *work)
        }
 
        /* Verify whether we must check ANI */
-       if (ah->config.enable_ani &&
-           (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+       if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
                aniflag = true;
                common->ani.checkani_timer = timestamp;
        }
@@ -841,8 +846,7 @@ set_timer:
        * short calibration and long calibration.
        */
        cal_interval = ATH_LONG_CALINTERVAL;
-       if (ah->config.enable_ani)
-               cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
+       cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
        if (!common->ani.caldone)
                cal_interval = min(cal_interval, (u32)short_cal_interval);
 
@@ -1052,6 +1056,9 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
        case NL80211_IFTYPE_AP:
                hvif.opmode = HTC_M_HOSTAP;
                break;
+       case NL80211_IFTYPE_MESH_POINT:
+               hvif.opmode = HTC_M_WDS;        /* close enough */
+               break;
        default:
                ath_err(common,
                        "Interface type %d not yet supported\n", vif->type);
@@ -1084,6 +1091,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
        INC_VIF(priv, vif->type);
 
        if ((vif->type == NL80211_IFTYPE_AP) ||
+           (vif->type == NL80211_IFTYPE_MESH_POINT) ||
            (vif->type == NL80211_IFTYPE_ADHOC))
                ath9k_htc_assign_bslot(priv, vif);
 
@@ -1134,6 +1142,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
        DEC_VIF(priv, vif->type);
 
        if ((vif->type == NL80211_IFTYPE_AP) ||
+            vif->type == NL80211_IFTYPE_MESH_POINT ||
            (vif->type == NL80211_IFTYPE_ADHOC))
                ath9k_htc_remove_bslot(priv, vif);
 
@@ -1525,9 +1534,10 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
        if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) {
                /*
                 * Disable SWBA interrupt only if there are no
-                * AP/IBSS interfaces.
+                * concurrent AP/mesh or IBSS interfaces.
                 */
-               if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) {
+               if ((priv->num_ap_vif + priv->num_mbss_vif <= 1) ||
+                    priv->num_ibss_vif) {
                        ath_dbg(common, CONFIG,
                                "Beacon disabled for BSS: %pM\n",
                                bss_conf->bssid);
@@ -1538,12 +1548,15 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
 
        if (changed & BSS_CHANGED_BEACON_INT) {
                /*
-                * Reset the HW TSF for the first AP interface.
+                * Reset the HW TSF for the first AP or mesh interface.
                 */
-               if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
-                   (priv->nvifs == 1) &&
-                   (priv->num_ap_vif == 1) &&
-                   (vif->type == NL80211_IFTYPE_AP)) {
+               if (priv->nvifs == 1 &&
+                   ((priv->ah->opmode == NL80211_IFTYPE_AP &&
+                     vif->type == NL80211_IFTYPE_AP &&
+                     priv->num_ap_vif == 1) ||
+                   (priv->ah->opmode == NL80211_IFTYPE_MESH_POINT &&
+                     vif->type == NL80211_IFTYPE_MESH_POINT &&
+                     priv->num_mbss_vif == 1))) {
                        set_bit(OP_TSF_RESET, &priv->op_flags);
                }
                ath_dbg(common, CONFIG,
@@ -1761,6 +1774,43 @@ static int ath9k_htc_get_stats(struct ieee80211_hw *hw,
        return 0;
 }
 
+struct base_eep_header *ath9k_htc_get_eeprom_base(struct ath9k_htc_priv *priv)
+{
+       struct base_eep_header *pBase = NULL;
+       /*
+        * This can be done since all the 3 EEPROM families have the
+        * same base header upto a certain point, and we are interested in
+        * the data only upto that point.
+        */
+
+       if (AR_SREV_9271(priv->ah))
+               pBase = (struct base_eep_header *)
+                       &priv->ah->eeprom.map4k.baseEepHeader;
+       else if (priv->ah->hw_version.usbdev == AR9280_USB)
+               pBase = (struct base_eep_header *)
+                       &priv->ah->eeprom.def.baseEepHeader;
+       else if (priv->ah->hw_version.usbdev == AR9287_USB)
+               pBase = (struct base_eep_header *)
+                       &priv->ah->eeprom.map9287.baseEepHeader;
+       return pBase;
+}
+
+
+static int ath9k_htc_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
+                                u32 *rx_ant)
+{
+       struct ath9k_htc_priv *priv = hw->priv;
+       struct base_eep_header *pBase = ath9k_htc_get_eeprom_base(priv);
+       if (pBase) {
+               *tx_ant = pBase->txMask;
+               *rx_ant = pBase->rxMask;
+       } else {
+               *tx_ant = 0;
+               *rx_ant = 0;
+       }
+       return 0;
+}
+
 struct ieee80211_ops ath9k_htc_ops = {
        .tx                 = ath9k_htc_tx,
        .start              = ath9k_htc_start,
@@ -1786,4 +1836,11 @@ struct ieee80211_ops ath9k_htc_ops = {
        .set_coverage_class = ath9k_htc_set_coverage_class,
        .set_bitrate_mask   = ath9k_htc_set_bitrate_mask,
        .get_stats          = ath9k_htc_get_stats,
+       .get_antenna        = ath9k_htc_get_antenna,
+
+#ifdef CONFIG_ATH9K_HTC_DEBUGFS
+       .get_et_sset_count  = ath9k_htc_get_et_sset_count,
+       .get_et_stats       = ath9k_htc_get_et_stats,
+       .get_et_strings     = ath9k_htc_get_et_strings,
+#endif
 };
index 6bd0e92ea2aaee7144aaa4c74fae7a8ff43a42c0..e602c951970906d5fb5edeb3bf176bbf5689df53 100644 (file)
@@ -887,7 +887,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
        if (priv->rxfilter & FIF_PSPOLL)
                rfilt |= ATH9K_RX_FILTER_PSPOLL;
 
-       if (priv->nvifs > 1)
+       if (priv->nvifs > 1 || priv->rxfilter & FIF_OTHER_BSS)
                rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
 
        return rfilt;
index 15dfefcf2d0fca7ef80f3eb64752c47c0bea5d45..4ca0cb06010609ca490bfe4404883bde2ac85d66 100644 (file)
@@ -452,7 +452,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
        ah->config.pcie_clock_req = 0;
        ah->config.pcie_waen = 0;
        ah->config.analog_shiftreg = 1;
-       ah->config.enable_ani = true;
 
        for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
                ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -549,8 +548,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
                ah->eep_ops->get_eeprom_ver(ah),
                ah->eep_ops->get_eeprom_rev(ah));
 
-       if (ah->config.enable_ani)
-               ath9k_hw_ani_init(ah);
+       ath9k_hw_ani_init(ah);
 
        return 0;
 }
@@ -1250,10 +1248,10 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
 
        switch (opmode) {
        case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_MESH_POINT:
                set |= AR_STA_ID1_ADHOC;
                REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
                break;
+       case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_AP:
                set |= AR_STA_ID1_STA_AP;
                /* fall through */
@@ -1872,7 +1870,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 
        ah->caldata = caldata;
        if (caldata && (chan->channel != caldata->channel ||
-                       chan->channelFlags != caldata->channelFlags)) {
+                       chan->channelFlags != caldata->channelFlags ||
+                       chan->chanmode != caldata->chanmode)) {
                /* Operating channel changed, reset channel calibration data */
                memset(caldata, 0, sizeof(*caldata));
                ath9k_init_nfcal_hist_buffer(ah, chan);
@@ -2255,12 +2254,12 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
 
        switch (ah->opmode) {
        case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_MESH_POINT:
                REG_SET_BIT(ah, AR_TXCFG,
                            AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
                REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon +
                          TU_TO_USEC(ah->atim_window ? ah->atim_window : 1));
                flags |= AR_NDP_TIMER_EN;
+       case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_AP:
                REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
                REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon -
@@ -2600,17 +2599,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
                if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
                        pCap->hw_caps |= ATH9K_HW_CAP_MCI;
 
-               if (AR_SREV_9462_20(ah))
+               if (AR_SREV_9462_20_OR_LATER(ah))
                        pCap->hw_caps |= ATH9K_HW_CAP_RTT;
        }
 
-       if (AR_SREV_9280_20_OR_LATER(ah)) {
-               pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE |
-                                ATH9K_HW_WOW_PATTERN_MATCH_EXACT;
-
-               if (AR_SREV_9280(ah))
-                       pCap->hw_caps |= ATH9K_HW_WOW_PATTERN_MATCH_DWORD;
-       }
+       if (AR_SREV_9462(ah))
+               pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE;
 
        if (AR_SREV_9300_20_OR_LATER(ah) &&
            ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
@@ -3048,7 +3042,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
 
        timer_next = tsf + trig_timeout;
 
-       ath_dbg(ath9k_hw_common(ah), HWTIMER,
+       ath_dbg(ath9k_hw_common(ah), BTCOEX,
                "current tsf %x period %x timer_next %x\n",
                tsf, timer_period, timer_next);
 
@@ -3147,7 +3141,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
                index = rightmost_index(timer_table, &thresh_mask);
                timer = timer_table->timers[index];
                BUG_ON(!timer);
-               ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n",
+               ath_dbg(common, BTCOEX, "TSF overflow for Gen timer %d\n",
                        index);
                timer->overflow(timer->arg);
        }
@@ -3156,7 +3150,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
                index = rightmost_index(timer_table, &trigger_mask);
                timer = timer_table->timers[index];
                BUG_ON(!timer);
-               ath_dbg(common, HWTIMER,
+               ath_dbg(common, BTCOEX,
                        "Gen timer[%d] trigger\n", index);
                timer->trigger(timer->arg);
        }
index ae3034374bc4ca1b064fe54d7a6ce6b5ed43b706..cd74b3afef7db179fc4fcedc23eaa509722da52c 100644 (file)
@@ -246,9 +246,7 @@ enum ath9k_hw_caps {
        ATH9K_HW_CAP_MCI                        = BIT(15),
        ATH9K_HW_CAP_DFS                        = BIT(16),
        ATH9K_HW_WOW_DEVICE_CAPABLE             = BIT(17),
-       ATH9K_HW_WOW_PATTERN_MATCH_EXACT        = BIT(18),
-       ATH9K_HW_WOW_PATTERN_MATCH_DWORD        = BIT(19),
-       ATH9K_HW_CAP_PAPRD                      = BIT(20),
+       ATH9K_HW_CAP_PAPRD                      = BIT(18),
 };
 
 /*
@@ -291,7 +289,6 @@ struct ath9k_ops_config {
        u32 ofdm_trig_high;
        u32 cck_trig_high;
        u32 cck_trig_low;
-       u32 enable_ani;
        u32 enable_paprd;
        int serialize_regmode;
        bool rx_intr_mitigation;
@@ -310,6 +307,10 @@ struct ath9k_ops_config {
        u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
        u8 max_txtrig_level;
        u16 ani_poll_interval; /* ANI poll interval in ms */
+
+       /* Platform specific config */
+       u32 xlna_gpio;
+       bool xatten_margin_cfg;
 };
 
 enum ath9k_int {
@@ -423,7 +424,6 @@ struct ath9k_hw_cal_data {
 
 struct ath9k_channel {
        struct ieee80211_channel *chan;
-       struct ar5416AniState ani;
        u16 channel;
        u32 channelFlags;
        u32 chanmode;
@@ -854,10 +854,10 @@ struct ath_hw {
        u32 globaltxtimeout;
 
        /* ANI */
-       u32 proc_phyerr;
        u32 aniperiod;
        enum ath9k_ani_cmd ani_function;
        u32 ani_skip_count;
+       struct ar5416AniState ani;
 
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
        struct ath_btcoex_hw btcoex_hw;
@@ -882,9 +882,6 @@ struct ath_hw {
        struct ar5416IniArray iniBank6;
        struct ar5416IniArray iniAddac;
        struct ar5416IniArray iniPcieSerdes;
-#ifdef CONFIG_PM_SLEEP
-       struct ar5416IniArray iniPcieSerdesWow;
-#endif
        struct ar5416IniArray iniPcieSerdesLowPower;
        struct ar5416IniArray iniModesFastClock;
        struct ar5416IniArray iniAdditional;
@@ -895,6 +892,9 @@ struct ath_hw {
        struct ar5416IniArray iniCckfirJapan2484;
        struct ar5416IniArray iniModes_9271_ANI_reg;
        struct ar5416IniArray ini_radio_post_sys2ant;
+       struct ar5416IniArray ini_modes_rxgain_5g_xlna;
+       struct ar5416IniArray ini_modes_rxgain_bb_core;
+       struct ar5416IniArray ini_modes_rxgain_bb_postamble;
 
        struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
        struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
@@ -1165,8 +1165,6 @@ static inline void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
 }
 #endif
 
-
-
 #define ATH9K_CLOCK_RATE_CCK           22
 #define ATH9K_CLOCK_RATE_5GHZ_OFDM     40
 #define ATH9K_CLOCK_RATE_2GHZ_OFDM     44
index 2ba494567777f35ea359a1beb883ecb0be471a74..16f8b201642b71de28ba4920735d0c8a2f5ee8b0 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/ath9k_platform.h>
 #include <linux/module.h>
 #include <linux/relay.h>
+#include <net/ieee80211_radiotap.h>
 
 #include "ath9k.h"
 
@@ -431,6 +432,8 @@ static int ath9k_init_queues(struct ath_softc *sc)
        sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
        ath_cabq_update(sc);
 
+       sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
+
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
                sc->tx.txq_map[i]->mac80211_qnum = i;
@@ -510,6 +513,27 @@ static void ath9k_init_misc(struct ath_softc *sc)
        sc->spec_config.fft_period = 0xF;
 }
 
+static void ath9k_init_platform(struct ath_softc *sc)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (common->bus_ops->ath_bus_type != ATH_PCI)
+               return;
+
+       if (sc->driver_data & (ATH9K_PCI_CUS198 |
+                              ATH9K_PCI_CUS230)) {
+               ah->config.xlna_gpio = 9;
+               ah->config.xatten_margin_cfg = true;
+
+               ath_info(common, "Set parameters for %s\n",
+                        (sc->driver_data & ATH9K_PCI_CUS198) ?
+                        "CUS198" : "CUS230");
+       } else if (sc->driver_data & ATH9K_PCI_CUS217) {
+               ath_info(common, "CUS217 card detected\n");
+       }
+}
+
 static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
                                    void *ctx)
 {
@@ -601,6 +625,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        common->btcoex_enabled = ath9k_btcoex_enable == 1;
        common->disable_ani = false;
 
+       /*
+        * Platform quirks.
+        */
+       ath9k_init_platform(sc);
+
        /*
         * Enable Antenna diversity only when BTCOEX is disabled
         * and the user manually requests the feature.
@@ -613,9 +642,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        spin_lock_init(&sc->sc_serial_rw);
        spin_lock_init(&sc->sc_pm_lock);
        mutex_init(&sc->mutex);
-#ifdef CONFIG_ATH9K_MAC_DEBUG
-       spin_lock_init(&sc->debug.samp_lock);
-#endif
        tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
        tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
                     (unsigned long)sc);
@@ -755,6 +781,15 @@ static const struct ieee80211_iface_combination if_comb[] = {
        }
 };
 
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support ath9k_wowlan_support = {
+       .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+       .n_patterns = MAX_NUM_USER_PATTERN,
+       .pattern_min_len = 1,
+       .pattern_max_len = MAX_PATTERN_SIZE,
+};
+#endif
+
 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
 {
        struct ath_hw *ah = sc->sc_ah;
@@ -769,12 +804,19 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                IEEE80211_HW_REPORTS_TX_ACK_STATUS |
                IEEE80211_HW_SUPPORTS_RC_TABLE;
 
-       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
-                hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+               hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+
+               if (AR_SREV_9280_20_OR_LATER(ah))
+                       hw->radiotap_mcs_details |=
+                               IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+       }
 
        if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
                hw->flags |= IEEE80211_HW_MFP_CAPABLE;
 
+       hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+
        hw->wiphy->interface_modes =
                BIT(NL80211_IFTYPE_P2P_GO) |
                BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -794,21 +836,13 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
        hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 
 #ifdef CONFIG_PM_SLEEP
-
        if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
-           device_can_wakeup(sc->dev)) {
-
-               hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
-                                         WIPHY_WOWLAN_DISCONNECT;
-               hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN;
-               hw->wiphy->wowlan.pattern_min_len = 1;
-               hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE;
-
-       }
+           (sc->driver_data & ATH9K_PCI_WOW) &&
+           device_can_wakeup(sc->dev))
+               hw->wiphy->wowlan = &ath9k_wowlan_support;
 
        atomic_set(&sc->wow_sleep_proc_intr, -1);
        atomic_set(&sc->wow_got_bmiss_intr, -1);
-
 #endif
 
        hw->queues = 4;
index 849259b07370f0966e4b17890e2567e19d9f9cdb..fff5d3ccc66325f351f32b6ab8c96b676592ed31 100644 (file)
@@ -390,9 +390,7 @@ void ath_ani_calibrate(unsigned long data)
        }
 
        /* Verify whether we must check ANI */
-       if (sc->sc_ah->config.enable_ani
-           && (timestamp - common->ani.checkani_timer) >=
-           ah->config.ani_poll_interval) {
+       if ((timestamp - common->ani.checkani_timer) >= ah->config.ani_poll_interval) {
                aniflag = true;
                common->ani.checkani_timer = timestamp;
        }
@@ -418,7 +416,6 @@ void ath_ani_calibrate(unsigned long data)
                longcal ? "long" : "", shortcal ? "short" : "",
                aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
 
-       ath9k_debug_samp_bb_mac(sc);
        ath9k_ps_restore(sc);
 
 set_timer:
@@ -428,9 +425,7 @@ set_timer:
        * short calibration and long calibration.
        */
        cal_interval = ATH_LONG_CALINTERVAL;
-       if (sc->sc_ah->config.enable_ani)
-               cal_interval = min(cal_interval,
-                                  (u32)ah->config.ani_poll_interval);
+       cal_interval = min(cal_interval, (u32)ah->config.ani_poll_interval);
        if (!common->ani.caldone)
                cal_interval = min(cal_interval, (u32)short_cal_interval);
 
index 566109a40fb3887e6236b3c56d00e8ebcc70354d..2ef05ebffbcf4d7c2167ed46c26aa192060427ed 100644 (file)
@@ -547,6 +547,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
 
        rs->rs_status = 0;
        rs->rs_flags = 0;
+       rs->flag = 0;
 
        rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
        rs->rs_tstamp = ads.AR_RcvTimestamp;
@@ -586,10 +587,17 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
        rs->rs_moreaggr =
                (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
        rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
-       rs->rs_flags =
-               (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
-       rs->rs_flags |=
-               (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
+
+       /* directly mapped flags for ieee80211_rx_status */
+       rs->flag |=
+               (ads.ds_rxstatus3 & AR_GI) ? RX_FLAG_SHORT_GI : 0;
+       rs->flag |=
+               (ads.ds_rxstatus3 & AR_2040) ? RX_FLAG_40MHZ : 0;
+       if (AR_SREV_9280_20_OR_LATER(ah))
+               rs->flag |=
+                       (ads.ds_rxstatus3 & AR_STBC) ?
+                               /* we can only Nss=1 STBC */
+                               (1 << RX_FLAG_STBC_SHIFT) : 0;
 
        if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
                rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
index 5865f92998e191f5d37fc2b78a192b4884308e1b..b02dfce964b4d82f952aaba959e9fcac5fbfe19c 100644 (file)
@@ -149,6 +149,7 @@ struct ath_rx_status {
        u32 evm2;
        u32 evm3;
        u32 evm4;
+       u32 flag; /* see enum mac80211_rx_flags */
 };
 
 struct ath_htc_rx_status {
@@ -533,7 +534,8 @@ struct ar5416_desc {
 #define AR_2040             0x00000002
 #define AR_Parallel40       0x00000004
 #define AR_Parallel40_S     2
-#define AR_RxStatusRsvd30   0x000000f8
+#define AR_STBC             0x00000008 /* on ar9280 and later */
+#define AR_RxStatusRsvd30   0x000000f0
 #define AR_RxAntenna       0xffffff00
 #define AR_RxAntenna_S     8
 
index 5092ecae7706ab7140bcf7d3f88f830be176cc38..1737a3e336859013e2b9aeb6d49d61da1b35a688 100644 (file)
@@ -193,7 +193,6 @@ static bool ath_prepare_reset(struct ath_softc *sc)
        ath_stop_ani(sc);
        del_timer_sync(&sc->rx_poll_timer);
 
-       ath9k_debug_samp_bb_mac(sc);
        ath9k_hw_disable_interrupts(ah);
 
        if (!ath_drain_all_txq(sc))
@@ -1211,13 +1210,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                ath_update_survey_stats(sc);
                spin_unlock_irqrestore(&common->cc_lock, flags);
 
-               /*
-                * Preserve the current channel values, before updating
-                * the same channel
-                */
-               if (ah->curchan && (old_pos == pos))
-                       ath9k_hw_getnf(ah, ah->curchan);
-
                ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
                                          curchan, channel_type);
 
@@ -1273,7 +1265,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
                                curchan->center_freq);
                } else {
                        /* perform spectral scan if requested. */
-                       if (sc->scanning &&
+                       if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
                            sc->spectral_mode == SPECTRAL_CHANSCAN)
                                ath9k_spectral_scan_trigger(hw);
                }
@@ -1690,7 +1682,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
        bool flush = false;
        int ret = 0;
 
-       local_bh_disable();
+       mutex_lock(&sc->mutex);
 
        switch (action) {
        case IEEE80211_AMPDU_RX_START:
@@ -1723,7 +1715,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
                ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
        }
 
-       local_bh_enable();
+       mutex_unlock(&sc->mutex);
 
        return ret;
 }
@@ -2007,7 +1999,6 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
-       struct ath9k_hw_capabilities *pcaps = &ah->caps;
        int pattern_count = 0;
        int i, byte_cnt;
        u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
@@ -2077,36 +2068,9 @@ static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
 
        /* Create Disassociate pattern mask */
 
-       if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_EXACT) {
-
-               if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_DWORD) {
-                       /*
-                        * for AR9280, because of hardware limitation, the
-                        * first 4 bytes have to be matched for all patterns.
-                        * the mask for disassociation and de-auth pattern
-                        * matching need to enable the first 4 bytes.
-                        * also the duration field needs to be filled.
-                        */
-                       dis_deauth_mask[0] = 0xf0;
-
-                       /*
-                        * fill in duration field
-                        FIXME: what is the exact value ?
-                        */
-                       dis_deauth_pattern[2] = 0xff;
-                       dis_deauth_pattern[3] = 0xff;
-               } else {
-                       dis_deauth_mask[0] = 0xfe;
-               }
-
-               dis_deauth_mask[1] = 0x03;
-               dis_deauth_mask[2] = 0xc0;
-       } else {
-               dis_deauth_mask[0] = 0xef;
-               dis_deauth_mask[1] = 0x3f;
-               dis_deauth_mask[2] = 0x00;
-               dis_deauth_mask[3] = 0xfc;
-       }
+       dis_deauth_mask[0] = 0xfe;
+       dis_deauth_mask[1] = 0x03;
+       dis_deauth_mask[2] = 0xc0;
 
        ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
 
@@ -2342,15 +2306,13 @@ static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
 static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
 {
        struct ath_softc *sc = hw->priv;
-
-       sc->scanning = 1;
+       set_bit(SC_OP_SCANNING, &sc->sc_flags);
 }
 
 static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
 {
        struct ath_softc *sc = hw->priv;
-
-       sc->scanning = 0;
+       clear_bit(SC_OP_SCANNING, &sc->sc_flags);
 }
 
 struct ieee80211_ops ath9k_ops = {
@@ -2378,6 +2340,7 @@ struct ieee80211_ops ath9k_ops = {
        .flush              = ath9k_flush,
        .tx_frames_pending  = ath9k_tx_frames_pending,
        .tx_last_beacon     = ath9k_tx_last_beacon,
+       .release_buffered_frames = ath9k_release_buffered_frames,
        .get_stats          = ath9k_get_stats,
        .set_antenna        = ath9k_set_antenna,
        .get_antenna        = ath9k_get_antenna,
index 0e0d39583837219a60a374d69494933b0ffc4e43..c585c9b359733ff377669ec9ac8f76859e492cda 100644 (file)
@@ -34,8 +34,108 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
        { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI   */
        { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
        { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E  AR9300 */
+
+       /* PCI-E CUS198 */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_AZWAVE,
+                        0x2086),
+         .driver_data = ATH9K_PCI_CUS198 },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_AZWAVE,
+                        0x1237),
+         .driver_data = ATH9K_PCI_CUS198 },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_AZWAVE,
+                        0x2126),
+         .driver_data = ATH9K_PCI_CUS198 },
+
+       /* PCI-E CUS230 */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_AZWAVE,
+                        0x2152),
+         .driver_data = ATH9K_PCI_CUS230 },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0032,
+                        PCI_VENDOR_ID_FOXCONN,
+                        0xE075),
+         .driver_data = ATH9K_PCI_CUS230 },
+
        { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E  AR9485 */
        { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E  AR9580 */
+
+       /* PCI-E CUS217 */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        PCI_VENDOR_ID_AZWAVE,
+                        0x2116),
+         .driver_data = ATH9K_PCI_CUS217 },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        0x11AD, /* LITEON */
+                        0x6661),
+         .driver_data = ATH9K_PCI_CUS217 },
+
+       /* AR9462 with WoW support */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        PCI_VENDOR_ID_ATHEROS,
+                        0x3117),
+         .driver_data = ATH9K_PCI_WOW },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        PCI_VENDOR_ID_LENOVO,
+                        0x3214),
+         .driver_data = ATH9K_PCI_WOW },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        PCI_VENDOR_ID_ATTANSIC,
+                        0x0091),
+         .driver_data = ATH9K_PCI_WOW },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        PCI_VENDOR_ID_AZWAVE,
+                        0x2110),
+         .driver_data = ATH9K_PCI_WOW },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        PCI_VENDOR_ID_ASUSTEK,
+                        0x850E),
+         .driver_data = ATH9K_PCI_WOW },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        0x11AD, /* LITEON */
+                        0x6631),
+         .driver_data = ATH9K_PCI_WOW },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        0x11AD, /* LITEON */
+                        0x6641),
+         .driver_data = ATH9K_PCI_WOW },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        PCI_VENDOR_ID_HP,
+                        0x1864),
+         .driver_data = ATH9K_PCI_WOW },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        0x14CD, /* USI */
+                        0x0063),
+         .driver_data = ATH9K_PCI_WOW },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        0x14CD, /* USI */
+                        0x0064),
+         .driver_data = ATH9K_PCI_WOW },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0034,
+                        0x10CF, /* Fujitsu */
+                        0x1783),
+         .driver_data = ATH9K_PCI_WOW },
+
        { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E  AR9462 */
        { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E  AR1111/AR9485 */
        { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E  AR9565 */
@@ -221,6 +321,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        sc->hw = hw;
        sc->dev = &pdev->dev;
        sc->mem = pcim_iomap_table(pdev)[0];
+       sc->driver_data = id->driver_data;
 
        /* Will be cleared in ath9k_start() */
        set_bit(SC_OP_INVALID, &sc->sc_flags);
index 8be2b5d8c155c91c0daafca2e7caa634a89f19e5..865e043e8aa6408c92e2b512ffcc7ba69c4ed39f 100644 (file)
@@ -868,10 +868,7 @@ static int ath9k_process_rate(struct ath_common *common,
        if (rx_stats->rs_rate & 0x80) {
                /* HT rate */
                rxs->flag |= RX_FLAG_HT;
-               if (rx_stats->rs_flags & ATH9K_RX_2040)
-                       rxs->flag |= RX_FLAG_40MHZ;
-               if (rx_stats->rs_flags & ATH9K_RX_GI)
-                       rxs->flag |= RX_FLAG_SHORT_GI;
+               rxs->flag |= rx_stats->flag;
                rxs->rate_idx = rx_stats->rs_rate & 0x7f;
                return 0;
        }
@@ -958,11 +955,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
        if (rx_stats->rs_more)
                return 0;
 
-       ath9k_process_rssi(common, hw, hdr, rx_stats);
-
        if (ath9k_process_rate(common, hw, rx_stats, rx_status))
                return -EINVAL;
 
+       ath9k_process_rssi(common, hw, hdr, rx_stats);
+
        rx_status->band = hw->conf.chandef.chan->band;
        rx_status->freq = hw->conf.chandef.chan->center_freq;
        rx_status->signal = ah->noise + rx_stats->rs_rssi;
index f7c90cc58d563428d92940233222bcb9d05ea12c..5af97442ac3745c139d824920e6ec7863ca057ed 100644 (file)
 #define AR_SREV_REVISION_9580_10       4 /* AR9580 1.0 */
 #define AR_SREV_VERSION_9462           0x280
 #define AR_SREV_REVISION_9462_20       2
+#define AR_SREV_REVISION_9462_21       3
 #define AR_SREV_VERSION_9565            0x2C0
 #define AR_SREV_REVISION_9565_10        0
 #define AR_SREV_VERSION_9550           0x400
 
 #define AR_SREV_9462(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462))
-
 #define AR_SREV_9462_20(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
-       ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
+        ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
+#define AR_SREV_9462_21(_ah) \
+       (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+        ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_21))
+#define AR_SREV_9462_20_OR_LATER(_ah) \
+       (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+        ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
+#define AR_SREV_9462_21_OR_LATER(_ah) \
+       (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
+        ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_21))
 
 #define AR_SREV_9565(_ah) \
        (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
index 9f8563091bea436d5973331edac261de73292035..81c88dd606dcd960f43e740b9af8cffcdc25778a 100644 (file)
@@ -34,17 +34,6 @@ const char *ath9k_hw_wow_event_to_string(u32 wow_event)
 }
 EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
 
-static void ath9k_hw_config_serdes_wow_sleep(struct ath_hw *ah)
-{
-       int i;
-
-       for (i = 0; i < ah->iniPcieSerdesWow.ia_rows; i++)
-               REG_WRITE(ah, INI_RA(&ah->iniPcieSerdesWow, i, 0),
-                         INI_RA(&ah->iniPcieSerdesWow, i, 1));
-
-       usleep_range(1000, 1500);
-}
-
 static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
@@ -58,15 +47,8 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
                ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
                        REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
                return;
-       } else {
-               if (!AR_SREV_9300_20_OR_LATER(ah))
-                       REG_WRITE(ah, AR_RXDP, 0x0);
        }
 
-       /* AR9280 WoW has sleep issue, do not set it to sleep */
-       if (AR_SREV_9280_20(ah))
-               return;
-
        REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
 }
 
@@ -84,27 +66,16 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
 
        /* set the transmit buffer */
        ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
-
-       if (!(AR_SREV_9300_20_OR_LATER(ah)))
-               ctl[0] += (KAL_ANTENNA_MODE << 25);
-
        ctl[1] = 0;
        ctl[3] = 0xb;   /* OFDM_6M hardware value for this rate */
        ctl[4] = 0;
        ctl[7] = (ah->txchainmask) << 2;
-
-       if (AR_SREV_9300_20_OR_LATER(ah))
-               ctl[2] = 0xf << 16; /* tx_tries 0 */
-       else
-               ctl[2] = 0x7 << 16; /* tx_tries 0 */
-
+       ctl[2] = 0xf << 16; /* tx_tries 0 */
 
        for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
                REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
 
-       /* for AR9300 family 13 descriptor words */
-       if (AR_SREV_9300_20_OR_LATER(ah))
-               REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+       REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
 
        data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
                       (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
@@ -183,9 +154,6 @@ void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
 
        ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
 
-       if (!AR_SREV_9285_12_OR_LATER(ah))
-               return;
-
        if (pattern_count < 4) {
                /* Pattern 0-3 uses AR_WOW_LENGTH1 register */
                set = (pattern_len & AR_WOW_LENGTH_MAX) <<
@@ -207,6 +175,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
 {
        u32 wow_status = 0;
        u32 val = 0, rval;
+
        /*
         * read the WoW status register to know
         * the wakeup reason
@@ -223,19 +192,14 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
        val &= ah->wow_event_mask;
 
        if (val) {
-
                if (val & AR_WOW_MAGIC_PAT_FOUND)
                        wow_status |= AH_WOW_MAGIC_PATTERN_EN;
-
                if (AR_WOW_PATTERN_FOUND(val))
                        wow_status |= AH_WOW_USER_PATTERN_EN;
-
                if (val & AR_WOW_KEEP_ALIVE_FAIL)
                        wow_status |= AH_WOW_LINK_CHANGE;
-
                if (val & AR_WOW_BEACON_FAIL)
                        wow_status |= AH_WOW_BEACON_MISS;
-
        }
 
        /*
@@ -254,17 +218,6 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
        REG_WRITE(ah, AR_WOW_PATTERN,
                  AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
 
-       /*
-        * tie reset register for AR9002 family of chipsets
-        * NB: not tieing it back might have some repurcussions.
-        */
-
-       if (!AR_SREV_9300_20_OR_LATER(ah)) {
-               REG_SET_BIT(ah, AR_WA, AR_WA_UNTIE_RESET_EN |
-                           AR_WA_POR_SHORT | AR_WA_RESET_EN);
-       }
-
-
        /*
         * restore the beacon threshold to init value
         */
@@ -277,8 +230,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
         * reset to our Chip's Power On Reset so that any PCI-E
         * reset from the bus will not reset our chip
         */
-
-       if (AR_SREV_9280_20_OR_LATER(ah) && ah->is_pciexpress)
+       if (ah->is_pciexpress)
                ath9k_hw_configpcipowersave(ah, false);
 
        ah->wow_event_mask = 0;
@@ -298,7 +250,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
         * are from the 'pattern_enable' in this function and
         * 'pattern_count' of ath9k_hw_wow_apply_pattern()
         */
-
        wow_event_mask = ah->wow_event_mask;
 
        /*
@@ -306,50 +257,15 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
         * WOW sleep, we do want the Reset from the PCI-E to disturb
         * our hw state
         */
-
        if (ah->is_pciexpress) {
-
                /*
                 * we need to untie the internal POR (power-on-reset)
                 * to the external PCI-E reset. We also need to tie
                 * the PCI-E Phy reset to the PCI-E reset.
                 */
-
-               if (AR_SREV_9300_20_OR_LATER(ah)) {
-                       set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
-                       clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
-                       REG_RMW(ah, AR_WA, set, clr);
-               } else {
-                       if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
-                               set = AR9285_WA_DEFAULT;
-                       else
-                               set = AR9280_WA_DEFAULT;
-
-                       /*
-                        * In AR9280 and AR9285, bit 14 in WA register
-                        * (disable L1) should only be set when device
-                        * enters D3 state and be cleared when device
-                        * comes back to D0
-                        */
-
-                       if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
-                               set |= AR_WA_D3_L1_DISABLE;
-
-                       clr = AR_WA_UNTIE_RESET_EN;
-                       set |= AR_WA_RESET_EN | AR_WA_POR_SHORT;
-                       REG_RMW(ah, AR_WA, set, clr);
-
-                       /*
-                        * for WoW sleep, we reprogram the SerDes so that the
-                        * PLL and CLK REQ are both enabled. This uses more
-                        * power but otherwise WoW sleep is unstable and the
-                        * chip may disappear.
-                        */
-
-                       if (AR_SREV_9285_12_OR_LATER(ah))
-                               ath9k_hw_config_serdes_wow_sleep(ah);
-
-               }
+               set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
+               clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
+               REG_RMW(ah, AR_WA, set, clr);
        }
 
        /*
@@ -378,7 +294,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
         * Program default values for pattern backoff, aifs/slot/KAL count,
         * beacon miss timeout, KAL timeout, etc.
         */
-
        set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
        REG_SET_BIT(ah, AR_WOW_PATTERN, set);
 
@@ -398,7 +313,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
        /*
         * Keep alive timo in ms except AR9280
         */
-       if (!pattern_enable || AR_SREV_9280(ah))
+       if (!pattern_enable)
                set = AR_WOW_KEEP_ALIVE_NEVER;
        else
                set = KAL_TIMEOUT * 32;
@@ -420,7 +335,6 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
        /*
         * Configure MAC WoW Registers
         */
-
        set = 0;
        /* Send keep alive timeouts anyway */
        clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
@@ -430,16 +344,9 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
        else
                set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
 
-       /*
-        * FIXME: For now disable keep alive frame
-        * failure. This seems to sometimes trigger
-        * unnecessary wake up with AR9485 chipsets.
-        */
        set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
-
        REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
 
-
        /*
         * we are relying on a bmiss failure. ensure we have
         * enough threshold to prevent false positives
@@ -473,14 +380,8 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
        set |= AR_WOW_MAC_INTR_EN;
        REG_RMW(ah, AR_WOW_PATTERN, set, clr);
 
-       /*
-        * For AR9285 and later version of chipsets
-        * enable WoW pattern match for packets less
-        * than 256 bytes for all patterns
-        */
-       if (AR_SREV_9285_12_OR_LATER(ah))
-               REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
-                         AR_WOW_PATTERN_SUPPORTED);
+       REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
+                 AR_WOW_PATTERN_SUPPORTED);
 
        /*
         * Set the power states appropriately and enable PME
@@ -488,43 +389,32 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
        clr = 0;
        set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
              AR_PMCTRL_PWR_PM_CTRL_ENA;
-       /*
-        * This is needed for AR9300 chipsets to wake-up
-        * the host.
-        */
-       if (AR_SREV_9300_20_OR_LATER(ah))
-               clr = AR_PCIE_PM_CTRL_ENA;
 
+       clr = AR_PCIE_PM_CTRL_ENA;
        REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
 
-       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
-               /*
-                * this is needed to prevent the chip waking up
-                * the host within 3-4 seconds with certain
-                * platform/BIOS. The fix is to enable
-                * D1 & D3 to match original definition and
-                * also match the OTP value. Anyway this
-                * is more related to SW WOW.
-                */
-               clr = AR_PMCTRL_PWR_STATE_D1D3;
-               REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
-
-               set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
-               REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
-       }
-
+       /*
+        * this is needed to prevent the chip waking up
+        * the host within 3-4 seconds with certain
+        * platform/BIOS. The fix is to enable
+        * D1 & D3 to match original definition and
+        * also match the OTP value. Anyway this
+        * is more related to SW WOW.
+        */
+       clr = AR_PMCTRL_PWR_STATE_D1D3;
+       REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
 
+       set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
+       REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
 
        REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
 
-       if (AR_SREV_9300_20_OR_LATER(ah)) {
-               /* to bring down WOW power low margin */
-               set = BIT(13);
-               REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
-               /* HW WoW */
-               clr = BIT(5);
-               REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
-       }
+       /* to bring down WOW power low margin */
+       set = BIT(13);
+       REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
+       /* HW WoW */
+       clr = BIT(5);
+       REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
 
        ath9k_hw_set_powermode_wow_sleep(ah);
        ah->wow_event_mask = wow_event_mask;
index 83ab6be3fe6d502178b3253943bdd0097f4f8cbe..c59ae43b9b35aeba9356f499a83f73ae02e96a8b 100644 (file)
@@ -518,6 +518,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
                                !txfail);
                } else {
+                       if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
+                               tx_info->flags &= ~IEEE80211_TX_STATUS_EOSP;
+                               ieee80211_sta_eosp(sta);
+                       }
                        /* retry the un-acked ones */
                        if (bf->bf_next == NULL && bf_last->bf_stale) {
                                struct ath_buf *tbf;
@@ -786,25 +790,20 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
        return ndelim;
 }
 
-static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
-                                            struct ath_txq *txq,
-                                            struct ath_atx_tid *tid,
-                                            struct list_head *bf_q,
-                                            int *aggr_len)
+static struct ath_buf *
+ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
+                       struct ath_atx_tid *tid)
 {
-#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
-       struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
-       int rl = 0, nframes = 0, ndelim, prev_al = 0;
-       u16 aggr_limit = 0, al = 0, bpad = 0,
-               al_delta, h_baw = tid->baw_size / 2;
-       enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
-       struct ieee80211_tx_info *tx_info;
        struct ath_frame_info *fi;
        struct sk_buff *skb;
+       struct ath_buf *bf;
        u16 seqno;
 
-       do {
+       while (1) {
                skb = skb_peek(&tid->buf_q);
+               if (!skb)
+                       break;
+
                fi = get_frame_info(skb);
                bf = fi->bf;
                if (!fi->bf)
@@ -820,10 +819,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
                seqno = bf->bf_state.seqno;
 
                /* do not step over block-ack window */
-               if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
-                       status = ATH_AGGR_BAW_CLOSED;
+               if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno))
                        break;
-               }
 
                if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
                        struct ath_tx_status ts = {};
@@ -837,6 +834,40 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
                        continue;
                }
 
+               bf->bf_next = NULL;
+               bf->bf_lastbf = bf;
+               return bf;
+       }
+
+       return NULL;
+}
+
+static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
+                                            struct ath_txq *txq,
+                                            struct ath_atx_tid *tid,
+                                            struct list_head *bf_q,
+                                            int *aggr_len)
+{
+#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
+       struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
+       int rl = 0, nframes = 0, ndelim, prev_al = 0;
+       u16 aggr_limit = 0, al = 0, bpad = 0,
+               al_delta, h_baw = tid->baw_size / 2;
+       enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
+       struct ieee80211_tx_info *tx_info;
+       struct ath_frame_info *fi;
+       struct sk_buff *skb;
+
+       do {
+               bf = ath_tx_get_tid_subframe(sc, txq, tid);
+               if (!bf) {
+                       status = ATH_AGGR_BAW_CLOSED;
+                       break;
+               }
+
+               skb = bf->bf_mpdu;
+               fi = get_frame_info(skb);
+
                if (!bf_first)
                        bf_first = bf;
 
@@ -882,7 +913,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
 
                /* link buffers of this frame to the aggregate */
                if (!fi->retries)
-                       ath_tx_addto_baw(sc, tid, seqno);
+                       ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
                bf->bf_state.ndelim = ndelim;
 
                __skb_unlink(skb, &tid->buf_q);
@@ -1090,10 +1121,8 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
                             struct ath_txq *txq, int len)
 {
        struct ath_hw *ah = sc->sc_ah;
-       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
-       struct ath_buf *bf_first = bf;
+       struct ath_buf *bf_first = NULL;
        struct ath_tx_info info;
-       bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
 
        memset(&info, 0, sizeof(info));
        info.is_first = true;
@@ -1101,24 +1130,11 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
        info.txpower = MAX_RATE_POWER;
        info.qcu = txq->axq_qnum;
 
-       info.flags = ATH9K_TXDESC_INTREQ;
-       if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
-               info.flags |= ATH9K_TXDESC_NOACK;
-       if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
-               info.flags |= ATH9K_TXDESC_LDPC;
-
-       ath_buf_set_rate(sc, bf, &info, len);
-
-       if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
-               info.flags |= ATH9K_TXDESC_CLRDMASK;
-
-       if (bf->bf_state.bfs_paprd)
-               info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S;
-
-
        while (bf) {
                struct sk_buff *skb = bf->bf_mpdu;
+               struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
                struct ath_frame_info *fi = get_frame_info(skb);
+               bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR);
 
                info.type = get_hw_packet_type(skb);
                if (bf->bf_next)
@@ -1126,6 +1142,26 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
                else
                        info.link = 0;
 
+               if (!bf_first) {
+                       bf_first = bf;
+
+                       info.flags = ATH9K_TXDESC_INTREQ;
+                       if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
+                           txq == sc->tx.uapsdq)
+                               info.flags |= ATH9K_TXDESC_CLRDMASK;
+
+                       if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+                               info.flags |= ATH9K_TXDESC_NOACK;
+                       if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
+                               info.flags |= ATH9K_TXDESC_LDPC;
+
+                       if (bf->bf_state.bfs_paprd)
+                               info.flags |= (u32) bf->bf_state.bfs_paprd <<
+                                             ATH9K_TXDESC_PAPRD_S;
+
+                       ath_buf_set_rate(sc, bf, &info, len);
+               }
+
                info.buf_addr[0] = bf->bf_buf_addr;
                info.buf_len[0] = skb->len;
                info.pkt_len = fi->framelen;
@@ -1135,7 +1171,7 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
                if (aggr) {
                        if (bf == bf_first)
                                info.aggr = AGGR_BUF_FIRST;
-                       else if (!bf->bf_next)
+                       else if (bf == bf_first->bf_lastbf)
                                info.aggr = AGGR_BUF_LAST;
                        else
                                info.aggr = AGGR_BUF_MIDDLE;
@@ -1144,6 +1180,9 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
                        info.aggr_len = len;
                }
 
+               if (bf == bf_first->bf_lastbf)
+                       bf_first = NULL;
+
                ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
                bf = bf->bf_next;
        }
@@ -1328,6 +1367,70 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
        ath_txq_unlock_complete(sc, txq);
 }
 
+void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
+                                  struct ieee80211_sta *sta,
+                                  u16 tids, int nframes,
+                                  enum ieee80211_frame_release_type reason,
+                                  bool more_data)
+{
+       struct ath_softc *sc = hw->priv;
+       struct ath_node *an = (struct ath_node *)sta->drv_priv;
+       struct ath_txq *txq = sc->tx.uapsdq;
+       struct ieee80211_tx_info *info;
+       struct list_head bf_q;
+       struct ath_buf *bf_tail = NULL, *bf;
+       int sent = 0;
+       int i;
+
+       INIT_LIST_HEAD(&bf_q);
+       for (i = 0; tids && nframes; i++, tids >>= 1) {
+               struct ath_atx_tid *tid;
+
+               if (!(tids & 1))
+                       continue;
+
+               tid = ATH_AN_2_TID(an, i);
+               if (tid->paused)
+                       continue;
+
+               ath_txq_lock(sc, tid->ac->txq);
+               while (!skb_queue_empty(&tid->buf_q) && nframes > 0) {
+                       bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
+                       if (!bf)
+                               break;
+
+                       __skb_unlink(bf->bf_mpdu, &tid->buf_q);
+                       list_add_tail(&bf->list, &bf_q);
+                       ath_set_rates(tid->an->vif, tid->an->sta, bf);
+                       ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
+                       bf->bf_state.bf_type &= ~BUF_AGGR;
+                       if (bf_tail)
+                               bf_tail->bf_next = bf;
+
+                       bf_tail = bf;
+                       nframes--;
+                       sent++;
+                       TX_STAT_INC(txq->axq_qnum, a_queued_hw);
+
+                       if (skb_queue_empty(&tid->buf_q))
+                               ieee80211_sta_set_buffered(an->sta, i, false);
+               }
+               ath_txq_unlock_complete(sc, tid->ac->txq);
+       }
+
+       if (list_empty(&bf_q))
+               return;
+
+       info = IEEE80211_SKB_CB(bf_tail->bf_mpdu);
+       info->flags |= IEEE80211_TX_STATUS_EOSP;
+
+       bf = list_first_entry(&bf_q, struct ath_buf, list);
+       ath_txq_lock(sc, txq);
+       ath_tx_fill_desc(sc, bf, txq, 0);
+       ath_tx_txqaddbuf(sc, txq, &bf_q, false);
+       ath_txq_unlock(sc, txq);
+}
+
 /********************/
 /* Queue Management */
 /********************/
@@ -1679,14 +1782,19 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
        }
 
        if (!internal) {
-               txq->axq_depth++;
-               if (bf_is_ampdu_not_probing(bf))
-                       txq->axq_ampdu_depth++;
+               while (bf) {
+                       txq->axq_depth++;
+                       if (bf_is_ampdu_not_probing(bf))
+                               txq->axq_ampdu_depth++;
+
+                       bf = bf->bf_lastbf->bf_next;
+               }
        }
 }
 
-static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
-                             struct sk_buff *skb, struct ath_tx_control *txctl)
+static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
+                             struct ath_atx_tid *tid, struct sk_buff *skb,
+                             struct ath_tx_control *txctl)
 {
        struct ath_frame_info *fi = get_frame_info(skb);
        struct list_head bf_head;
@@ -1699,21 +1807,22 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
         * - seqno is not within block-ack window
         * - h/w queue depth exceeds low water mark
         */
-       if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
-           !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
-           txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
+       if ((!skb_queue_empty(&tid->buf_q) || tid->paused ||
+            !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
+            txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
+           txq != sc->tx.uapsdq) {
                /*
                 * Add this frame to software queue for scheduling later
                 * for aggregation.
                 */
-               TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
+               TX_STAT_INC(txq->axq_qnum, a_queued_sw);
                __skb_queue_tail(&tid->buf_q, skb);
                if (!txctl->an || !txctl->an->sleeping)
-                       ath_tx_queue_tid(txctl->txq, tid);
+                       ath_tx_queue_tid(txq, tid);
                return;
        }
 
-       bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+       bf = ath_tx_setup_buffer(sc, txq, tid, skb);
        if (!bf) {
                ieee80211_free_txskb(sc->hw, skb);
                return;
@@ -1728,10 +1837,10 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
        ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
 
        /* Queue to h/w without aggregation */
-       TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
+       TX_STAT_INC(txq->axq_qnum, a_queued_hw);
        bf->bf_lastbf = bf;
-       ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen);
-       ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false);
+       ath_tx_fill_desc(sc, bf, txq, fi->framelen);
+       ath_tx_txqaddbuf(sc, txq, &bf_head, false);
 }
 
 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
@@ -1869,22 +1978,16 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
        return bf;
 }
 
-/* Upon failure caller should free skb */
-int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
-                struct ath_tx_control *txctl)
+static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
+                         struct ath_tx_control *txctl)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_sta *sta = txctl->sta;
        struct ieee80211_vif *vif = info->control.vif;
        struct ath_softc *sc = hw->priv;
-       struct ath_txq *txq = txctl->txq;
-       struct ath_atx_tid *tid = NULL;
-       struct ath_buf *bf;
-       int padpos, padsize;
        int frmlen = skb->len + FCS_LEN;
-       u8 tidno;
-       int q;
+       int padpos, padsize;
 
        /* NOTE:  sta can be NULL according to net/mac80211.h */
        if (sta)
@@ -1905,6 +2008,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
        }
 
+       if ((vif && vif->type != NL80211_IFTYPE_AP &&
+                   vif->type != NL80211_IFTYPE_AP_VLAN) ||
+           !ieee80211_is_data(hdr->frame_control))
+               info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+
        /* Add the padding after the header if this is not already done */
        padpos = ieee80211_hdrlen(hdr->frame_control);
        padsize = padpos & 3;
@@ -1914,16 +2022,34 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
 
                skb_push(skb, padsize);
                memmove(skb->data, skb->data + padsize, padpos);
-               hdr = (struct ieee80211_hdr *) skb->data;
        }
 
-       if ((vif && vif->type != NL80211_IFTYPE_AP &&
-                   vif->type != NL80211_IFTYPE_AP_VLAN) ||
-           !ieee80211_is_data(hdr->frame_control))
-               info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
-
        setup_frame_info(hw, sta, skb, frmlen);
+       return 0;
+}
+
+
+/* Upon failure caller should free skb */
+int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
+                struct ath_tx_control *txctl)
+{
+       struct ieee80211_hdr *hdr;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_sta *sta = txctl->sta;
+       struct ieee80211_vif *vif = info->control.vif;
+       struct ath_softc *sc = hw->priv;
+       struct ath_txq *txq = txctl->txq;
+       struct ath_atx_tid *tid = NULL;
+       struct ath_buf *bf;
+       u8 tidno;
+       int q;
+       int ret;
+
+       ret = ath_tx_prepare(hw, skb, txctl);
+       if (ret)
+           return ret;
 
+       hdr = (struct ieee80211_hdr *) skb->data;
        /*
         * At this point, the vif, hw_key and sta pointers in the tx control
         * info are no longer valid (overwritten by the ath_frame_info data.
@@ -1939,6 +2065,12 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                txq->stopped = true;
        }
 
+       if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
+               ath_txq_unlock(sc, txq);
+               txq = sc->tx.uapsdq;
+               ath_txq_lock(sc, txq);
+       }
+
        if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
                tidno = ieee80211_get_qos_ctl(hdr)[0] &
                        IEEE80211_QOS_CTL_TID_MASK;
@@ -1952,11 +2084,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                 * Try aggregation if it's a unicast data frame
                 * and the destination is HT capable.
                 */
-               ath_tx_send_ampdu(sc, tid, skb, txctl);
+               ath_tx_send_ampdu(sc, txq, tid, skb, txctl);
                goto out;
        }
 
-       bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+       bf = ath_tx_setup_buffer(sc, txq, tid, skb);
        if (!bf) {
                if (txctl->paprd)
                        dev_kfree_skb_any(skb);
@@ -1971,7 +2103,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                bf->bf_state.bfs_paprd_timestamp = jiffies;
 
        ath_set_rates(vif, sta, bf);
-       ath_tx_send_normal(sc, txctl->txq, tid, skb);
+       ath_tx_send_normal(sc, txq, tid, skb);
 
 out:
        ath_txq_unlock(sc, txq);
@@ -1979,6 +2111,74 @@ out:
        return 0;
 }
 
+void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                struct sk_buff *skb)
+{
+       struct ath_softc *sc = hw->priv;
+       struct ath_tx_control txctl = {
+               .txq = sc->beacon.cabq
+       };
+       struct ath_tx_info info = {};
+       struct ieee80211_hdr *hdr;
+       struct ath_buf *bf_tail = NULL;
+       struct ath_buf *bf;
+       LIST_HEAD(bf_q);
+       int duration = 0;
+       int max_duration;
+
+       max_duration =
+               sc->cur_beacon_conf.beacon_interval * 1000 *
+               sc->cur_beacon_conf.dtim_period / ATH_BCBUF;
+
+       do {
+               struct ath_frame_info *fi = get_frame_info(skb);
+
+               if (ath_tx_prepare(hw, skb, &txctl))
+                       break;
+
+               bf = ath_tx_setup_buffer(sc, txctl.txq, NULL, skb);
+               if (!bf)
+                       break;
+
+               bf->bf_lastbf = bf;
+               ath_set_rates(vif, NULL, bf);
+               ath_buf_set_rate(sc, bf, &info, fi->framelen);
+               duration += info.rates[0].PktDuration;
+               if (bf_tail)
+                       bf_tail->bf_next = bf;
+
+               list_add_tail(&bf->list, &bf_q);
+               bf_tail = bf;
+               skb = NULL;
+
+               if (duration > max_duration)
+                       break;
+
+               skb = ieee80211_get_buffered_bc(hw, vif);
+       } while(skb);
+
+       if (skb)
+               ieee80211_free_txskb(hw, skb);
+
+       if (list_empty(&bf_q))
+               return;
+
+       bf = list_first_entry(&bf_q, struct ath_buf, list);
+       hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
+
+       if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
+               hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
+               dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
+                       sizeof(*hdr), DMA_TO_DEVICE);
+       }
+
+       ath_txq_lock(sc, txctl.txq);
+       ath_tx_fill_desc(sc, bf, txctl.txq, 0);
+       ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
+       TX_STAT_INC(txctl.txq->axq_qnum, queued);
+       ath_txq_unlock(sc, txctl.txq);
+}
+
 /*****************/
 /* TX Completion */
 /*****************/
@@ -2024,7 +2224,12 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
        }
        spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
 
+       __skb_queue_tail(&txq->complete_q, skb);
+
        q = skb_get_queue_mapping(skb);
+       if (txq == sc->tx.uapsdq)
+               txq = sc->tx.txq_map[q];
+
        if (txq == sc->tx.txq_map[q]) {
                if (WARN_ON(--txq->pending_frames < 0))
                        txq->pending_frames = 0;
@@ -2035,8 +2240,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
                        txq->stopped = false;
                }
        }
-
-       __skb_queue_tail(&txq->complete_q, skb);
 }
 
 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
index 9dce106cd6d4a1b963910a8310737be18929867a..8596aba34f96854dfe1700da64b44ef832e5fc9a 100644 (file)
@@ -133,6 +133,9 @@ struct carl9170_sta_tid {
 
        /* Preaggregation reorder queue */
        struct sk_buff_head queue;
+
+       struct ieee80211_sta *sta;
+       struct ieee80211_vif *vif;
 };
 
 #define CARL9170_QUEUE_TIMEOUT         256
index e9010a481dfdf475d61f567bb0f5f11f2e44dded..4a33c6e39ca28be1089aca1275c6e64edda6666d 100644 (file)
@@ -1448,6 +1448,8 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
                tid_info->state = CARL9170_TID_STATE_PROGRESS;
                tid_info->tid = tid;
                tid_info->max = sta_info->ampdu_max_len;
+               tid_info->sta = sta;
+               tid_info->vif = vif;
 
                INIT_LIST_HEAD(&tid_info->list);
                INIT_LIST_HEAD(&tid_info->tmp_list);
@@ -1857,6 +1859,7 @@ void *carl9170_alloc(size_t priv_size)
                     IEEE80211_HW_SUPPORTS_PS |
                     IEEE80211_HW_PS_NULLFUNC_STACK |
                     IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
+                    IEEE80211_HW_SUPPORTS_RC_TABLE |
                     IEEE80211_HW_SIGNAL_DBM;
 
        if (!modparam_noht) {
index c61cafa2665b18c5215a953273919a024c5c4473..e3f696ee4d2393b00cbebd38d7f2840c51376149 100644 (file)
@@ -625,7 +625,7 @@ static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
                    msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
                        goto unlock;
 
-               sta = __carl9170_get_tx_sta(ar, skb);
+               sta = iter->sta;
                if (WARN_ON(!sta))
                        goto unlock;
 
@@ -866,6 +866,93 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
        return false;
 }
 
+static void carl9170_tx_get_rates(struct ar9170 *ar,
+                                 struct ieee80211_vif *vif,
+                                 struct ieee80211_sta *sta,
+                                 struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info;
+
+       BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
+       BUILD_BUG_ON(IEEE80211_TX_MAX_RATES > IEEE80211_TX_RATE_TABLE_SIZE);
+
+       info = IEEE80211_SKB_CB(skb);
+
+       ieee80211_get_tx_rates(vif, sta, skb,
+                              info->control.rates,
+                              IEEE80211_TX_MAX_RATES);
+}
+
+static void carl9170_tx_apply_rateset(struct ar9170 *ar,
+                                     struct ieee80211_tx_info *sinfo,
+                                     struct sk_buff *skb)
+{
+       struct ieee80211_tx_rate *txrate;
+       struct ieee80211_tx_info *info;
+       struct _carl9170_tx_superframe *txc = (void *) skb->data;
+       int i;
+       bool ampdu;
+       bool no_ack;
+
+       info = IEEE80211_SKB_CB(skb);
+       ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
+       no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
+
+       /* Set the rate control probe flag for all (sub-) frames.
+        * This is because the TX_STATS_AMPDU flag is only set on
+        * the last frame, so it has to be inherited.
+        */
+       info->flags |= (sinfo->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+
+       /* NOTE: For the first rate, the ERP & AMPDU flags are directly
+        * taken from mac_control. For all fallback rate, the firmware
+        * updates the mac_control flags from the rate info field.
+        */
+       for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
+               __le32 phy_set;
+
+               txrate = &sinfo->control.rates[i];
+               if (txrate->idx < 0)
+                       break;
+
+               phy_set = carl9170_tx_physet(ar, info, txrate);
+               if (i == 0) {
+                       __le16 mac_tmp = cpu_to_le16(0);
+
+                       /* first rate - part of the hw's frame header */
+                       txc->f.phy_control = phy_set;
+
+                       if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
+                               mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
+
+                       if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
+                               mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
+                       else if (carl9170_tx_cts_check(ar, txrate))
+                               mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
+
+                       txc->f.mac_control |= mac_tmp;
+               } else {
+                       /* fallback rates are stored in the firmware's
+                        * retry rate set array.
+                        */
+                       txc->s.rr[i - 1] = phy_set;
+               }
+
+               SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
+                       txrate->count);
+
+               if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
+                       txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
+                               CARL9170_TX_SUPER_RI_ERP_PROT_S);
+               else if (carl9170_tx_cts_check(ar, txrate))
+                       txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
+                               CARL9170_TX_SUPER_RI_ERP_PROT_S);
+
+               if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
+                       txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
+       }
+}
+
 static int carl9170_tx_prepare(struct ar9170 *ar,
                               struct ieee80211_sta *sta,
                               struct sk_buff *skb)
@@ -874,13 +961,10 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
        struct _carl9170_tx_superframe *txc;
        struct carl9170_vif_info *cvif;
        struct ieee80211_tx_info *info;
-       struct ieee80211_tx_rate *txrate;
        struct carl9170_tx_info *arinfo;
        unsigned int hw_queue;
-       int i;
        __le16 mac_tmp;
        u16 len;
-       bool ampdu, no_ack;
 
        BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
        BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
@@ -889,8 +973,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
        BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
                     AR9170_TX_HWDESC_LEN);
 
-       BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
-
        BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
                ((CARL9170_TX_SUPER_MISC_VIF_ID >>
                 CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
@@ -932,8 +1014,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
        mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
                               AR9170_TX_MAC_QOS);
 
-       no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
-       if (unlikely(no_ack))
+       if (unlikely(info->flags & IEEE80211_TX_CTL_NO_ACK))
                mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
 
        if (info->control.hw_key) {
@@ -954,8 +1035,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
                }
        }
 
-       ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
-       if (ampdu) {
+       if (info->flags & IEEE80211_TX_CTL_AMPDU) {
                unsigned int density, factor;
 
                if (unlikely(!sta || !cvif))
@@ -982,50 +1062,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
                        txc->s.ampdu_settings, factor);
        }
 
-       /*
-        * NOTE: For the first rate, the ERP & AMPDU flags are directly
-        * taken from mac_control. For all fallback rate, the firmware
-        * updates the mac_control flags from the rate info field.
-        */
-       for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
-               __le32 phy_set;
-               txrate = &info->control.rates[i];
-               if (txrate->idx < 0)
-                       break;
-
-               phy_set = carl9170_tx_physet(ar, info, txrate);
-               if (i == 0) {
-                       /* first rate - part of the hw's frame header */
-                       txc->f.phy_control = phy_set;
-
-                       if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
-                               mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
-                       if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
-                               mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
-                       else if (carl9170_tx_cts_check(ar, txrate))
-                               mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
-
-               } else {
-                       /* fallback rates are stored in the firmware's
-                        * retry rate set array.
-                        */
-                       txc->s.rr[i - 1] = phy_set;
-               }
-
-               SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
-                       txrate->count);
-
-               if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
-                       txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
-                               CARL9170_TX_SUPER_RI_ERP_PROT_S);
-               else if (carl9170_tx_cts_check(ar, txrate))
-                       txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
-                               CARL9170_TX_SUPER_RI_ERP_PROT_S);
-
-               if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
-                       txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
-       }
-
        txc->s.len = cpu_to_le16(skb->len);
        txc->f.length = cpu_to_le16(len + FCS_LEN);
        txc->f.mac_control = mac_tmp;
@@ -1086,31 +1122,12 @@ static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
        }
 }
 
-static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest,
-                                  struct sk_buff *_src)
-{
-       struct _carl9170_tx_superframe *dest, *src;
-
-       dest = (void *) _dest->data;
-       src = (void *) _src->data;
-
-       /*
-        * The mac80211 rate control algorithm expects that all MPDUs in
-        * an AMPDU share the same tx vectors.
-        * This is not really obvious right now, because the hardware
-        * does the AMPDU setup according to its own rulebook.
-        * Our nicely assembled, strictly monotonic increasing mpdu
-        * chains will be broken up, mashed back together...
-        */
-
-       return (dest->f.phy_control == src->f.phy_control);
-}
-
 static void carl9170_tx_ampdu(struct ar9170 *ar)
 {
        struct sk_buff_head agg;
        struct carl9170_sta_tid *tid_info;
        struct sk_buff *skb, *first;
+       struct ieee80211_tx_info *tx_info_first;
        unsigned int i = 0, done_ampdus = 0;
        u16 seq, queue, tmpssn;
 
@@ -1156,6 +1173,7 @@ retry:
                        goto processed;
                }
 
+               tx_info_first = NULL;
                while ((skb = skb_peek(&tid_info->queue))) {
                        /* strict 0, 1, ..., n - 1, n frame sequence order */
                        if (unlikely(carl9170_get_seq(skb) != seq))
@@ -1166,8 +1184,13 @@ retry:
                            (tid_info->max - 1)))
                                break;
 
-                       if (!carl9170_tx_rate_check(ar, skb, first))
-                               break;
+                       if (!tx_info_first) {
+                               carl9170_tx_get_rates(ar, tid_info->vif,
+                                                     tid_info->sta, first);
+                               tx_info_first = IEEE80211_SKB_CB(first);
+                       }
+
+                       carl9170_tx_apply_rateset(ar, tx_info_first, skb);
 
                        atomic_inc(&ar->tx_ampdu_upload);
                        tid_info->snx = seq = SEQ_NEXT(seq);
@@ -1182,8 +1205,7 @@ retry:
                if (skb_queue_empty(&tid_info->queue) ||
                    carl9170_get_seq(skb_peek(&tid_info->queue)) !=
                    tid_info->snx) {
-                       /*
-                        * stop TID, if A-MPDU frames are still missing,
+                       /* stop TID, if A-MPDU frames are still missing,
                         * or whenever the queue is empty.
                         */
 
@@ -1450,12 +1472,14 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
        struct ar9170 *ar = hw->priv;
        struct ieee80211_tx_info *info;
        struct ieee80211_sta *sta = control->sta;
+       struct ieee80211_vif *vif;
        bool run;
 
        if (unlikely(!IS_STARTED(ar)))
                goto err_free;
 
        info = IEEE80211_SKB_CB(skb);
+       vif = info->control.vif;
 
        if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
                goto err_free;
@@ -1486,6 +1510,8 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
        } else {
                unsigned int queue = skb_get_queue_mapping(skb);
 
+               carl9170_tx_get_rates(ar, vif, sta, skb);
+               carl9170_tx_apply_rateset(ar, info, skb);
                skb_queue_tail(&ar->tx_pending[queue], skb);
        }
 
index ccc4c718f124c2a500410cc9ed16f48841e8ec29..7d077c752dd56ca9e3c3d9cb0aae0c951795cadb 100644 (file)
@@ -42,11 +42,11 @@ static int __ath_regd_init(struct ath_regulatory *reg);
                                NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_OFDM)
 
 /* We allow IBSS on these on a case by case basis by regulatory domain */
-#define ATH9K_5GHZ_5150_5350   REG_RULE(5150-10, 5350+10, 40, 0, 30,\
+#define ATH9K_5GHZ_5150_5350   REG_RULE(5150-10, 5350+10, 80, 0, 30,\
                                NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
-#define ATH9K_5GHZ_5470_5850   REG_RULE(5470-10, 5850+10, 40, 0, 30,\
+#define ATH9K_5GHZ_5470_5850   REG_RULE(5470-10, 5850+10, 80, 0, 30,\
                                NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
-#define ATH9K_5GHZ_5725_5850   REG_RULE(5725-10, 5850+10, 40, 0, 30,\
+#define ATH9K_5GHZ_5725_5850   REG_RULE(5725-10, 5850+10, 80, 0, 30,\
                                NL80211_RRF_PASSIVE_SCAN | NL80211_RRF_NO_IBSS)
 
 #define ATH9K_2GHZ_ALL         ATH9K_2GHZ_CH01_11, \
index bac3d98a0cfb21f731e1a9b77c44dd50a67934bb..ce8c0381825e674c2eca80510761ae186283073f 100644 (file)
@@ -27,3 +27,15 @@ config WIL6210_ISR_COR
          self-clear when accessed for debug purposes, it makes
          such monitoring impossible.
          Say y unless you debug interrupts
+
+config WIL6210_TRACING
+       bool "wil6210 tracing support"
+       depends on WIL6210
+       depends on EVENT_TRACING
+       default y
+       ---help---
+         Say Y here to enable tracepoints for the wil6210 driver
+         using the kernel tracing infrastructure.  Select this
+         option if you are interested in debugging the driver.
+
+         If unsure, say Y to make it easier to debug problems.
index d288eea0a26a0136bd66b8c8bed4e8a1dd4dd2ee..f891d514d88175ba055588f85a9adf7507195c4e 100644 (file)
@@ -1,15 +1,20 @@
 obj-$(CONFIG_WIL6210) += wil6210.o
 
-wil6210-objs := main.o
-wil6210-objs += netdev.o
-wil6210-objs += cfg80211.o
-wil6210-objs += pcie_bus.o
-wil6210-objs += debugfs.o
-wil6210-objs += wmi.o
-wil6210-objs += interrupt.o
-wil6210-objs += txrx.o
+wil6210-y := main.o
+wil6210-y += netdev.o
+wil6210-y += cfg80211.o
+wil6210-y += pcie_bus.o
+wil6210-y += debugfs.o
+wil6210-y += wmi.o
+wil6210-y += interrupt.o
+wil6210-y += txrx.o
+wil6210-y += debug.o
+wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
 
 ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
        subdir-ccflags-y += -Werror
 endif
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
+
 subdir-ccflags-y += -D__CHECK_ENDIAN__
index c5d4a87abaaf76eb3e91c167a0905d69cd96edef..61c302a6bdeaa38ef1a1d20b3ac1e477db794745 100644 (file)
@@ -322,12 +322,16 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
         * FW don't support scan after connection attempt
         */
        set_bit(wil_status_dontscan, &wil->status);
+       set_bit(wil_status_fwconnecting, &wil->status);
 
        rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
        if (rc == 0) {
                /* Connect can take lots of time */
                mod_timer(&wil->connect_timer,
                          jiffies + msecs_to_jiffies(2000));
+       } else {
+               clear_bit(wil_status_dontscan, &wil->status);
+               clear_bit(wil_status_fwconnecting, &wil->status);
        }
 
  out:
@@ -398,6 +402,30 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
        return 0;
 }
 
+static int wil_fix_bcon(struct wil6210_priv *wil,
+                       struct cfg80211_beacon_data *bcon)
+{
+       struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
+       size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+       int rc = 0;
+
+       if (bcon->probe_resp_len <= hlen)
+               return 0;
+
+       if (!bcon->proberesp_ies) {
+               bcon->proberesp_ies = f->u.probe_resp.variable;
+               bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
+               rc = 1;
+       }
+       if (!bcon->assocresp_ies) {
+               bcon->assocresp_ies = f->u.probe_resp.variable;
+               bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
+               rc = 1;
+       }
+
+       return rc;
+}
+
 static int wil_cfg80211_start_ap(struct wiphy *wiphy,
                                 struct net_device *ndev,
                                 struct cfg80211_ap_settings *info)
@@ -419,10 +447,18 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
                             info->ssid, info->ssid_len);
 
+       if (wil_fix_bcon(wil, bcon))
+               wil_dbg_misc(wil, "Fixed bcon\n");
+
        rc = wil_reset(wil);
        if (rc)
                return rc;
 
+       /* Rx VRING. */
+       rc = wil_rx_init(wil);
+       if (rc)
+               return rc;
+
        rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
        if (rc)
                return rc;
@@ -451,8 +487,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        if (rc)
                return rc;
 
-       /* Rx VRING. After MAC and beacon */
-       rc = wil_rx_init(wil);
 
        netif_carrier_on(ndev);
 
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
new file mode 100644 (file)
index 0000000..9eeabf4
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+#include "trace.h"
+
+int wil_err(struct wil6210_priv *wil, const char *fmt, ...)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct va_format vaf = {
+               .fmt = fmt,
+       };
+       va_list args;
+       int ret;
+
+       va_start(args, fmt);
+       vaf.va = &args;
+       ret = netdev_err(ndev, "%pV", &vaf);
+       trace_wil6210_log_err(&vaf);
+       va_end(args);
+
+       return ret;
+}
+
+int wil_info(struct wil6210_priv *wil, const char *fmt, ...)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct va_format vaf = {
+               .fmt = fmt,
+       };
+       va_list args;
+       int ret;
+
+       va_start(args, fmt);
+       vaf.va = &args;
+       ret = netdev_info(ndev, "%pV", &vaf);
+       trace_wil6210_log_info(&vaf);
+       va_end(args);
+
+       return ret;
+}
+
+int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...)
+{
+       struct va_format vaf = {
+               .fmt = fmt,
+       };
+       va_list args;
+
+       va_start(args, fmt);
+       vaf.va = &args;
+       trace_wil6210_log_dbg(&vaf);
+       va_end(args);
+
+       return 0;
+}
index 727b1f53e6adac700cc744b5450ffc49979cb6f4..e8308ec309704958b1754c06dbeb7b3fef9f452b 100644 (file)
@@ -418,9 +418,15 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
                if (skb) {
                        unsigned char printbuf[16 * 3 + 2];
                        int i = 0;
-                       int len = skb_headlen(skb);
+                       int len = le16_to_cpu(d->dma.length);
                        void *p = skb->data;
 
+                       if (len != skb_headlen(skb)) {
+                               seq_printf(s, "!!! len: desc = %d skb = %d\n",
+                                          len, skb_headlen(skb));
+                               len = min_t(int, len, skb_headlen(skb));
+                       }
+
                        seq_printf(s, "    len = %d\n", len);
 
                        while (i < len) {
index e3c1e7684f9c6874772484f69e8687d27b8722b7..8205d3e4ab66613134ca4b6dff022a9995eb0e24 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/interrupt.h>
 
 #include "wil6210.h"
+#include "trace.h"
 
 /**
  * Theory of operation:
@@ -103,14 +104,14 @@ static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
        clear_bit(wil_status_irqen, &wil->status);
 }
 
-static void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
+void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
 {
        iowrite32(WIL6210_IMC_TX, wil->csr +
                  HOSTADDR(RGF_DMA_EP_TX_ICR) +
                  offsetof(struct RGF_ICR, IMC));
 }
 
-static void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
+void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
 {
        iowrite32(WIL6210_IMC_RX, wil->csr +
                  HOSTADDR(RGF_DMA_EP_RX_ICR) +
@@ -168,6 +169,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
                                         HOSTADDR(RGF_DMA_EP_RX_ICR) +
                                         offsetof(struct RGF_ICR, ICR));
 
+       trace_wil6210_irq_rx(isr);
        wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
 
        if (!isr) {
@@ -180,13 +182,14 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
        if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
                wil_dbg_irq(wil, "RX done\n");
                isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
-               wil_rx_handle(wil);
+               wil_dbg_txrx(wil, "NAPI schedule\n");
+               napi_schedule(&wil->napi_rx);
        }
 
        if (isr)
                wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
 
-       wil6210_unmask_irq_rx(wil);
+       /* Rx IRQ will be enabled when NAPI processing finished */
 
        return IRQ_HANDLED;
 }
@@ -198,6 +201,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
                                         HOSTADDR(RGF_DMA_EP_TX_ICR) +
                                         offsetof(struct RGF_ICR, ICR));
 
+       trace_wil6210_irq_tx(isr);
        wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
 
        if (!isr) {
@@ -208,23 +212,17 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
        wil6210_mask_irq_tx(wil);
 
        if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
-               uint i;
                wil_dbg_irq(wil, "TX done\n");
+               napi_schedule(&wil->napi_tx);
                isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
-               for (i = 0; i < 24; i++) {
-                       u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i);
-                       if (isr & mask) {
-                               isr &= ~mask;
-                               wil_dbg_irq(wil, "TX done(%i)\n", i);
-                               wil_tx_complete(wil, i);
-                       }
-               }
+               /* clear also all VRING interrupts */
+               isr &= ~(BIT(25) - 1UL);
        }
 
        if (isr)
                wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
 
-       wil6210_unmask_irq_tx(wil);
+       /* Tx IRQ will be enabled when NAPI processing finished */
 
        return IRQ_HANDLED;
 }
@@ -256,6 +254,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
                                         HOSTADDR(RGF_DMA_EP_MISC_ICR) +
                                         offsetof(struct RGF_ICR, ICR));
 
+       trace_wil6210_irq_misc(isr);
        wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
 
        if (!isr) {
@@ -301,6 +300,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
        struct wil6210_priv *wil = cookie;
        u32 isr = wil->isr_misc;
 
+       trace_wil6210_irq_misc_thread(isr);
        wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
 
        if (isr & ISR_MISC_FW_ERROR) {
@@ -408,6 +408,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
        if (wil6210_debug_irq_mask(wil, pseudo_cause))
                return IRQ_NONE;
 
+       trace_wil6210_irq_pseudo(pseudo_cause);
        wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause);
 
        wil6210_mask_irq_pseudo(wil);
index a0478e2f68688ebdcc71f2351e7593efa4f3c12a..0a2844c48a604a6fcf40c3f53e464f645f0d556d 100644 (file)
@@ -56,27 +56,21 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
 {
        uint i;
        struct net_device *ndev = wil_to_ndev(wil);
-       struct wireless_dev *wdev = wil->wdev;
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
        wil_link_off(wil);
-       clear_bit(wil_status_fwconnected, &wil->status);
-
-       switch (wdev->sme_state) {
-       case CFG80211_SME_CONNECTED:
-               cfg80211_disconnected(ndev, WLAN_STATUS_UNSPECIFIED_FAILURE,
+       if (test_bit(wil_status_fwconnected, &wil->status)) {
+               clear_bit(wil_status_fwconnected, &wil->status);
+               cfg80211_disconnected(ndev,
+                                     WLAN_STATUS_UNSPECIFIED_FAILURE,
                                      NULL, 0, GFP_KERNEL);
-               break;
-       case CFG80211_SME_CONNECTING:
+       } else if (test_bit(wil_status_fwconnecting, &wil->status)) {
                cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
                                        WLAN_STATUS_UNSPECIFIED_FAILURE,
                                        GFP_KERNEL);
-               break;
-       default:
-               break;
        }
-
+       clear_bit(wil_status_fwconnecting, &wil->status);
        for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
                wil_vring_fini_tx(wil, i);
 
@@ -292,41 +286,36 @@ static int __wil_up(struct wil6210_priv *wil)
 {
        struct net_device *ndev = wil_to_ndev(wil);
        struct wireless_dev *wdev = wil->wdev;
-       struct ieee80211_channel *channel = wdev->preset_chandef.chan;
        int rc;
-       int bi;
-       u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
 
        rc = wil_reset(wil);
        if (rc)
                return rc;
 
-       /* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */
-       wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC);
+       /* Rx VRING. After MAC and beacon */
+       rc = wil_rx_init(wil);
+       if (rc)
+               return rc;
+
        switch (wdev->iftype) {
        case NL80211_IFTYPE_STATION:
                wil_dbg_misc(wil, "type: STATION\n");
-               bi = 0;
                ndev->type = ARPHRD_ETHER;
                break;
        case NL80211_IFTYPE_AP:
                wil_dbg_misc(wil, "type: AP\n");
-               bi = 100;
                ndev->type = ARPHRD_ETHER;
                break;
        case NL80211_IFTYPE_P2P_CLIENT:
                wil_dbg_misc(wil, "type: P2P_CLIENT\n");
-               bi = 0;
                ndev->type = ARPHRD_ETHER;
                break;
        case NL80211_IFTYPE_P2P_GO:
                wil_dbg_misc(wil, "type: P2P_GO\n");
-               bi = 100;
                ndev->type = ARPHRD_ETHER;
                break;
        case NL80211_IFTYPE_MONITOR:
                wil_dbg_misc(wil, "type: Monitor\n");
-               bi = 0;
                ndev->type = ARPHRD_IEEE80211_RADIOTAP;
                /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
                break;
@@ -334,36 +323,12 @@ static int __wil_up(struct wil6210_priv *wil)
                return -EOPNOTSUPP;
        }
 
-       /* Apply profile in the following order: */
-       /* SSID and channel for the AP */
-       switch (wdev->iftype) {
-       case NL80211_IFTYPE_AP:
-       case NL80211_IFTYPE_P2P_GO:
-               if (wdev->ssid_len == 0) {
-                       wil_err(wil, "SSID not set\n");
-                       return -EINVAL;
-               }
-               rc = wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
-               if (rc)
-                       return rc;
-               break;
-       default:
-               break;
-       }
-
        /* MAC address - pre-requisite for other commands */
        wmi_set_mac_address(wil, ndev->dev_addr);
 
-       /* Set up beaconing if required. */
-       if (bi > 0) {
-               rc = wmi_pcp_start(wil, bi, wmi_nettype,
-                                  (channel ? channel->hw_value : 0));
-               if (rc)
-                       return rc;
-       }
 
-       /* Rx VRING. After MAC and beacon */
-       wil_rx_init(wil);
+       napi_enable(&wil->napi_rx);
+       napi_enable(&wil->napi_tx);
 
        return 0;
 }
@@ -381,6 +346,9 @@ int wil_up(struct wil6210_priv *wil)
 
 static int __wil_down(struct wil6210_priv *wil)
 {
+       napi_disable(&wil->napi_rx);
+       napi_disable(&wil->napi_tx);
+
        if (wil->scan_request) {
                cfg80211_scan_done(wil->scan_request, true);
                wil->scan_request = NULL;
index 098a8ec6b841ac9433ed3d66f99cbeaaa74ba635..29dd1e58cb170b20c36ff215ae64412a74e88266 100644 (file)
@@ -40,6 +40,55 @@ static const struct net_device_ops wil_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
+static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
+{
+       struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+                                               napi_rx);
+       int quota = budget;
+       int done;
+
+       wil_rx_handle(wil, &quota);
+       done = budget - quota;
+
+       if (done <= 1) { /* burst ends - only one packet processed */
+               napi_complete(napi);
+               wil6210_unmask_irq_rx(wil);
+               wil_dbg_txrx(wil, "NAPI RX complete\n");
+       }
+
+       wil_dbg_txrx(wil, "NAPI RX poll(%d) done %d\n", budget, done);
+
+       return done;
+}
+
+static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
+{
+       struct wil6210_priv *wil = container_of(napi, struct wil6210_priv,
+                                               napi_tx);
+       int tx_done = 0;
+       uint i;
+
+       /* always process ALL Tx complete, regardless budget - it is fast */
+       for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+               struct vring *vring = &wil->vring_tx[i];
+
+               if (!vring->va)
+                       continue;
+
+               tx_done += wil_tx_complete(wil, i);
+       }
+
+       if (tx_done <= 1) { /* burst ends - only one packet processed */
+               napi_complete(napi);
+               wil6210_unmask_irq_tx(wil);
+               wil_dbg_txrx(wil, "NAPI TX complete\n");
+       }
+
+       wil_dbg_txrx(wil, "NAPI TX poll(%d) done %d\n", budget, tx_done);
+
+       return min(tx_done, budget);
+}
+
 void *wil_if_alloc(struct device *dev, void __iomem *csr)
 {
        struct net_device *ndev;
@@ -81,6 +130,11 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
        SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
        wdev->netdev = ndev;
 
+       netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
+                      WIL6210_NAPI_BUDGET);
+       netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
+                      WIL6210_NAPI_BUDGET);
+
        wil_link_off(wil);
 
        return wil;
diff --git a/drivers/net/wireless/ath/wil6210/trace.c b/drivers/net/wireless/ath/wil6210/trace.c
new file mode 100644 (file)
index 0000000..cd2534b
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
new file mode 100644 (file)
index 0000000..eff1239
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wil6210
+#if !defined(WIL6210_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define WIL6210_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "wil6210.h"
+#include "txrx.h"
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_WIL6210_TRACING) || defined(__CHECKER__)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
+
+DECLARE_EVENT_CLASS(wil6210_wmi,
+       TP_PROTO(u16 id, void *buf, u16 buf_len),
+
+       TP_ARGS(id, buf, buf_len),
+
+       TP_STRUCT__entry(
+               __field(u16, id)
+               __field(u16, buf_len)
+               __dynamic_array(u8, buf, buf_len)
+       ),
+
+       TP_fast_assign(
+               __entry->id = id;
+               __entry->buf_len = buf_len;
+               memcpy(__get_dynamic_array(buf), buf, buf_len);
+       ),
+
+       TP_printk(
+               "id 0x%04x len %d",
+               __entry->id, __entry->buf_len
+       )
+);
+
+DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
+       TP_PROTO(u16 id, void *buf, u16 buf_len),
+       TP_ARGS(id, buf, buf_len)
+);
+
+DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
+       TP_PROTO(u16 id, void *buf, u16 buf_len),
+       TP_ARGS(id, buf, buf_len)
+);
+
+#define WIL6210_MSG_MAX (200)
+
+DECLARE_EVENT_CLASS(wil6210_log_event,
+       TP_PROTO(struct va_format *vaf),
+       TP_ARGS(vaf),
+       TP_STRUCT__entry(
+               __dynamic_array(char, msg, WIL6210_MSG_MAX)
+       ),
+       TP_fast_assign(
+               WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+                                      WIL6210_MSG_MAX,
+                                      vaf->fmt,
+                                      *vaf->va) >= WIL6210_MSG_MAX);
+       ),
+       TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(wil6210_log_event, wil6210_log_err,
+       TP_PROTO(struct va_format *vaf),
+       TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(wil6210_log_event, wil6210_log_info,
+       TP_PROTO(struct va_format *vaf),
+       TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(wil6210_log_event, wil6210_log_dbg,
+       TP_PROTO(struct va_format *vaf),
+       TP_ARGS(vaf)
+);
+
+#define wil_pseudo_irq_cause(x) __print_flags(x, "|",  \
+       {BIT_DMA_PSEUDO_CAUSE_RX,       "Rx" },         \
+       {BIT_DMA_PSEUDO_CAUSE_TX,       "Tx" },         \
+       {BIT_DMA_PSEUDO_CAUSE_MISC,     "Misc" })
+
+TRACE_EVENT(wil6210_irq_pseudo,
+       TP_PROTO(u32 x),
+       TP_ARGS(x),
+       TP_STRUCT__entry(
+               __field(u32, x)
+       ),
+       TP_fast_assign(
+               __entry->x = x;
+       ),
+       TP_printk("cause 0x%08x : %s", __entry->x,
+                 wil_pseudo_irq_cause(__entry->x))
+);
+
+DECLARE_EVENT_CLASS(wil6210_irq,
+       TP_PROTO(u32 x),
+       TP_ARGS(x),
+       TP_STRUCT__entry(
+               __field(u32, x)
+       ),
+       TP_fast_assign(
+               __entry->x = x;
+       ),
+       TP_printk("cause 0x%08x", __entry->x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_rx,
+       TP_PROTO(u32 x),
+       TP_ARGS(x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_tx,
+       TP_PROTO(u32 x),
+       TP_ARGS(x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_misc,
+       TP_PROTO(u32 x),
+       TP_ARGS(x)
+);
+
+DEFINE_EVENT(wil6210_irq, wil6210_irq_misc_thread,
+       TP_PROTO(u32 x),
+       TP_ARGS(x)
+);
+
+TRACE_EVENT(wil6210_rx,
+       TP_PROTO(u16 index, struct vring_rx_desc *d),
+       TP_ARGS(index, d),
+       TP_STRUCT__entry(
+               __field(u16, index)
+               __field(unsigned int, len)
+               __field(u8, mid)
+               __field(u8, cid)
+               __field(u8, tid)
+               __field(u8, type)
+               __field(u8, subtype)
+               __field(u16, seq)
+               __field(u8, mcs)
+       ),
+       TP_fast_assign(
+               __entry->index = index;
+               __entry->len = d->dma.length;
+               __entry->mid = wil_rxdesc_mid(d);
+               __entry->cid = wil_rxdesc_cid(d);
+               __entry->tid = wil_rxdesc_tid(d);
+               __entry->type = wil_rxdesc_ftype(d);
+               __entry->subtype = wil_rxdesc_subtype(d);
+               __entry->seq = wil_rxdesc_seq(d);
+               __entry->mcs = wil_rxdesc_mcs(d);
+       ),
+       TP_printk("index %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x"
+                 " type 0x%1x subtype 0x%1x", __entry->index, __entry->len,
+                 __entry->mid, __entry->cid, __entry->tid, __entry->mcs,
+                 __entry->seq, __entry->type, __entry->subtype)
+);
+
+TRACE_EVENT(wil6210_tx,
+       TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags),
+       TP_ARGS(vring, index, len, frags),
+       TP_STRUCT__entry(
+               __field(u8, vring)
+               __field(u8, frags)
+               __field(u16, index)
+               __field(unsigned int, len)
+       ),
+       TP_fast_assign(
+               __entry->vring = vring;
+               __entry->frags = frags;
+               __entry->index = index;
+               __entry->len = len;
+       ),
+       TP_printk("vring %d index %d len %d frags %d",
+                 __entry->vring, __entry->index, __entry->len, __entry->frags)
+);
+
+TRACE_EVENT(wil6210_tx_done,
+       TP_PROTO(u8 vring, u16 index, unsigned int len, u8 err),
+       TP_ARGS(vring, index, len, err),
+       TP_STRUCT__entry(
+               __field(u8, vring)
+               __field(u8, err)
+               __field(u16, index)
+               __field(unsigned int, len)
+       ),
+       TP_fast_assign(
+               __entry->vring = vring;
+               __entry->index = index;
+               __entry->len = len;
+               __entry->err = err;
+       ),
+       TP_printk("vring %d index %d len %d err 0x%02x",
+                 __entry->vring, __entry->index, __entry->len,
+                 __entry->err)
+);
+
+#endif /* WIL6210_TRACE_H || TRACE_HEADER_MULTI_READ*/
+
+#if defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__)
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+#endif /* defined(CONFIG_WIL6210_TRACING) && !defined(__CHECKER__) */
index 797024507c712109e427aed881dcc48444ae3f4c..d240b24e1ccfafdd7d809c4b45951cbe2747a31a 100644 (file)
@@ -22,6 +22,7 @@
 #include "wil6210.h"
 #include "wmi.h"
 #include "txrx.h"
+#include "trace.h"
 
 static bool rtap_include_phy_info;
 module_param(rtap_include_phy_info, bool, S_IRUGO);
@@ -89,8 +90,8 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
         * we can use any
         */
        for (i = 0; i < vring->size; i++) {
-               volatile struct vring_tx_desc *d = &(vring->va[i].tx);
-               d->dma.status = TX_DMA_STATUS_DU;
+               volatile struct vring_tx_desc *_d = &(vring->va[i].tx);
+               _d->dma.status = TX_DMA_STATUS_DU;
        }
 
        wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
@@ -106,30 +107,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
        size_t sz = vring->size * sizeof(vring->va[0]);
 
        while (!wil_vring_is_empty(vring)) {
+               dma_addr_t pa;
+               struct sk_buff *skb;
+               u16 dmalen;
+
                if (tx) {
-                       volatile struct vring_tx_desc *d =
+                       struct vring_tx_desc dd, *d = &dd;
+                       volatile struct vring_tx_desc *_d =
                                        &vring->va[vring->swtail].tx;
-                       dma_addr_t pa = d->dma.addr_low |
-                                       ((u64)d->dma.addr_high << 32);
-                       struct sk_buff *skb = vring->ctx[vring->swtail];
+
+                       *d = *_d;
+                       pa = wil_desc_addr(&d->dma.addr);
+                       dmalen = le16_to_cpu(d->dma.length);
+                       skb = vring->ctx[vring->swtail];
                        if (skb) {
-                               dma_unmap_single(dev, pa, d->dma.length,
+                               dma_unmap_single(dev, pa, dmalen,
                                                 DMA_TO_DEVICE);
                                dev_kfree_skb_any(skb);
                                vring->ctx[vring->swtail] = NULL;
                        } else {
-                               dma_unmap_page(dev, pa, d->dma.length,
+                               dma_unmap_page(dev, pa, dmalen,
                                               DMA_TO_DEVICE);
                        }
                        vring->swtail = wil_vring_next_tail(vring);
                } else { /* rx */
-                       volatile struct vring_rx_desc *d =
+                       struct vring_rx_desc dd, *d = &dd;
+                       volatile struct vring_rx_desc *_d =
                                        &vring->va[vring->swtail].rx;
-                       dma_addr_t pa = d->dma.addr_low |
-                                       ((u64)d->dma.addr_high << 32);
-                       struct sk_buff *skb = vring->ctx[vring->swhead];
-                       dma_unmap_single(dev, pa, d->dma.length,
-                                        DMA_FROM_DEVICE);
+
+                       *d = *_d;
+                       pa = wil_desc_addr(&d->dma.addr);
+                       dmalen = le16_to_cpu(d->dma.length);
+                       skb = vring->ctx[vring->swhead];
+                       dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
                        kfree_skb(skb);
                        wil_vring_advance_head(vring, 1);
                }
@@ -151,7 +161,8 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
 {
        struct device *dev = wil_to_dev(wil);
        unsigned int sz = RX_BUF_LEN;
-       volatile struct vring_rx_desc *d = &(vring->va[i].rx);
+       struct vring_rx_desc dd, *d = &dd;
+       volatile struct vring_rx_desc *_d = &(vring->va[i].rx);
        dma_addr_t pa;
 
        /* TODO align */
@@ -169,13 +180,13 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
        }
 
        d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
-       d->dma.addr_low = lower_32_bits(pa);
-       d->dma.addr_high = (u16)upper_32_bits(pa);
+       wil_desc_addr_set(&d->dma.addr, pa);
        /* ip_length don't care */
        /* b11 don't care */
        /* error don't care */
        d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
-       d->dma.length = sz;
+       d->dma.length = cpu_to_le16(sz);
+       *_d = *d;
        vring->ctx[i] = skb;
 
        return 0;
@@ -321,11 +332,12 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
 {
        struct device *dev = wil_to_dev(wil);
        struct net_device *ndev = wil_to_ndev(wil);
-       volatile struct vring_rx_desc *d;
-       struct vring_rx_desc *d1;
+       volatile struct vring_rx_desc *_d;
+       struct vring_rx_desc *d;
        struct sk_buff *skb;
        dma_addr_t pa;
        unsigned int sz = RX_BUF_LEN;
+       u16 dmalen;
        u8 ftype;
        u8 ds_bits;
 
@@ -334,32 +346,44 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        if (wil_vring_is_empty(vring))
                return NULL;
 
-       d = &(vring->va[vring->swhead].rx);
-       if (!(d->dma.status & RX_DMA_STATUS_DU)) {
+       _d = &(vring->va[vring->swhead].rx);
+       if (!(_d->dma.status & RX_DMA_STATUS_DU)) {
                /* it is not error, we just reached end of Rx done area */
                return NULL;
        }
 
-       pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
        skb = vring->ctx[vring->swhead];
+       d = wil_skb_rxdesc(skb);
+       *d = *_d;
+       pa = wil_desc_addr(&d->dma.addr);
+       vring->ctx[vring->swhead] = NULL;
+       wil_vring_advance_head(vring, 1);
+
        dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
-       skb_trim(skb, d->dma.length);
+       dmalen = le16_to_cpu(d->dma.length);
+
+       trace_wil6210_rx(vring->swhead, d);
+       wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen);
+       wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
+                         (const void *)d, sizeof(*d), false);
+
+       if (dmalen > sz) {
+               wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+               kfree_skb(skb);
+               return NULL;
+       }
+       skb_trim(skb, dmalen);
+
+       wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+                         skb->data, skb_headlen(skb), false);
 
-       d1 = wil_skb_rxdesc(skb);
-       *d1 = *d;
 
-       wil->stats.last_mcs_rx = wil_rxdesc_mcs(d1);
+       wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
 
        /* use radiotap header only if required */
        if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
                wil_rx_add_radiotap_header(wil, skb);
 
-       wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
-       wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
-                         (const void *)d, sizeof(*d), false);
-
-       wil_vring_advance_head(vring, 1);
-
        /* no extra checks if in sniffer mode */
        if (ndev->type != ARPHRD_ETHER)
                return skb;
@@ -368,7 +392,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
         * Driver should recognize it by frame type, that is found
         * in Rx descriptor. If type is not data, it is 802.11 frame as is
         */
-       ftype = wil_rxdesc_ftype(d1) << 2;
+       ftype = wil_rxdesc_ftype(d) << 2;
        if (ftype != IEEE80211_FTYPE_DATA) {
                wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
                /* TODO: process it */
@@ -383,7 +407,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
                return NULL;
        }
 
-       ds_bits = wil_rxdesc_ds_bits(d1);
+       ds_bits = wil_rxdesc_ds_bits(d);
        if (ds_bits == 1) {
                /*
                 * HW bug - in ToDS mode, i.e. Rx on AP side,
@@ -425,6 +449,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
 
 /*
  * Pass Rx packet to the netif. Update statistics.
+ * Called in softirq context (NAPI poll).
  */
 static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
 {
@@ -433,10 +458,7 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
 
        skb_orphan(skb);
 
-       if (in_interrupt())
-               rc = netif_rx(skb);
-       else
-               rc = netif_rx_ni(skb);
+       rc = netif_receive_skb(skb);
 
        if (likely(rc == NET_RX_SUCCESS)) {
                ndev->stats.rx_packets++;
@@ -450,9 +472,9 @@ static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
 /**
  * Proceed all completed skb's from Rx VRING
  *
- * Safe to call from IRQ
+ * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
  */
-void wil_rx_handle(struct wil6210_priv *wil)
+void wil_rx_handle(struct wil6210_priv *wil, int *quota)
 {
        struct net_device *ndev = wil_to_ndev(wil);
        struct vring *v = &wil->vring_rx;
@@ -463,9 +485,8 @@ void wil_rx_handle(struct wil6210_priv *wil)
                return;
        }
        wil_dbg_txrx(wil, "%s()\n", __func__);
-       while (NULL != (skb = wil_vring_reap_rx(wil, v))) {
-               wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
-                                 skb->data, skb_headlen(skb), false);
+       while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
+               (*quota)--;
 
                if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
                        skb->dev = ndev;
@@ -600,18 +621,17 @@ static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
        return NULL;
 }
 
-static int wil_tx_desc_map(volatile struct vring_tx_desc *d,
-                          dma_addr_t pa, u32 len)
+static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
+                          int vring_index)
 {
-       d->dma.addr_low = lower_32_bits(pa);
-       d->dma.addr_high = (u16)upper_32_bits(pa);
+       wil_desc_addr_set(&d->dma.addr, pa);
        d->dma.ip_length = 0;
        /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
        d->dma.b11 = 0/*14 | BIT(7)*/;
        d->dma.error = 0;
        d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
-       d->dma.length = len;
-       d->dma.d0 = 0;
+       d->dma.length = cpu_to_le16((u16)len);
+       d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
        d->mac.d[0] = 0;
        d->mac.d[1] = 0;
        d->mac.d[2] = 0;
@@ -630,7 +650,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                        struct sk_buff *skb)
 {
        struct device *dev = wil_to_dev(wil);
-       volatile struct vring_tx_desc *d;
+       struct vring_tx_desc dd, *d = &dd;
+       volatile struct vring_tx_desc *_d;
        u32 swhead = vring->swhead;
        int avail = wil_vring_avail_tx(vring);
        int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -648,7 +669,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                        1 + nr_frags);
                return -ENOMEM;
        }
-       d = &(vring->va[i].tx);
+       _d = &(vring->va[i].tx);
 
        /* FIXME FW can accept only unicast frames for the peer */
        memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
@@ -664,28 +685,32 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        if (unlikely(dma_mapping_error(dev, pa)))
                return -EINVAL;
        /* 1-st segment */
-       wil_tx_desc_map(d, pa, skb_headlen(skb));
+       wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
        d->mac.d[2] |= ((nr_frags + 1) <<
                       MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+       if (nr_frags)
+               *_d = *d;
+
        /* middle segments */
        for (f = 0; f < nr_frags; f++) {
                const struct skb_frag_struct *frag =
                                &skb_shinfo(skb)->frags[f];
                int len = skb_frag_size(frag);
                i = (swhead + f + 1) % vring->size;
-               d = &(vring->va[i].tx);
+               _d = &(vring->va[i].tx);
                pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
                                DMA_TO_DEVICE);
                if (unlikely(dma_mapping_error(dev, pa)))
                        goto dma_error;
-               wil_tx_desc_map(d, pa, len);
+               wil_tx_desc_map(d, pa, len, vring_index);
                vring->ctx[i] = NULL;
+               *_d = *d;
        }
        /* for the last seg only */
        d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
-       d->dma.d0 |= BIT(9); /* BUG: undocumented bit */
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
        d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
-       d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+       *_d = *d;
 
        wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
                          (const void *)d, sizeof(*d), false);
@@ -693,6 +718,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        /* advance swhead */
        wil_vring_advance_head(vring, nr_frags + 1);
        wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
+       trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
        iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
        /* hold reference to skb
         * to prevent skb release before accounting
@@ -705,14 +731,18 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        /* unmap what we have mapped */
        /* Note: increment @f to operate with positive index */
        for (f++; f > 0; f--) {
+               u16 dmalen;
+
                i = (swhead + f) % vring->size;
-               d = &(vring->va[i].tx);
-               d->dma.status = TX_DMA_STATUS_DU;
-               pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+               _d = &(vring->va[i].tx);
+               *d = *_d;
+               _d->dma.status = TX_DMA_STATUS_DU;
+               pa = wil_desc_addr(&d->dma.addr);
+               dmalen = le16_to_cpu(d->dma.length);
                if (vring->ctx[i])
-                       dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+                       dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
                else
-                       dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+                       dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
        }
 
        return -EINVAL;
@@ -738,18 +768,16 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                wil_err(wil, "Xmit in monitor mode not supported\n");
                goto drop;
        }
-       if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
-               rc = wmi_tx_eapol(wil, skb);
-       } else {
-               /* find vring */
-               vring = wil_find_tx_vring(wil, skb);
-               if (!vring) {
-                       wil_err(wil, "No Tx VRING available\n");
-                       goto drop;
-               }
-               /* set up vring entry */
-               rc = wil_tx_vring(wil, vring, skb);
+
+       /* find vring */
+       vring = wil_find_tx_vring(wil, skb);
+       if (!vring) {
+               wil_err(wil, "No Tx VRING available\n");
+               goto drop;
        }
+       /* set up vring entry */
+       rc = wil_tx_vring(wil, vring, skb);
+
        switch (rc) {
        case 0:
                /* statistics will be updated on the tx_complete */
@@ -761,7 +789,6 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                break; /* goto drop; */
        }
  drop:
-       netif_tx_stop_all_queues(ndev);
        ndev->stats.tx_dropped++;
        dev_kfree_skb_any(skb);
 
@@ -771,41 +798,48 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 /**
  * Clean up transmitted skb's from the Tx VRING
  *
+ * Return number of descriptors cleared
+ *
  * Safe to call from IRQ
  */
-void wil_tx_complete(struct wil6210_priv *wil, int ringid)
+int wil_tx_complete(struct wil6210_priv *wil, int ringid)
 {
        struct net_device *ndev = wil_to_ndev(wil);
        struct device *dev = wil_to_dev(wil);
        struct vring *vring = &wil->vring_tx[ringid];
+       int done = 0;
 
        if (!vring->va) {
                wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
-               return;
+               return 0;
        }
 
        wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
 
        while (!wil_vring_is_empty(vring)) {
-               volatile struct vring_tx_desc *d1 =
+               volatile struct vring_tx_desc *_d =
                                              &vring->va[vring->swtail].tx;
                struct vring_tx_desc dd, *d = &dd;
                dma_addr_t pa;
                struct sk_buff *skb;
+               u16 dmalen;
 
-               dd = *d1;
+               *d = *_d;
 
                if (!(d->dma.status & TX_DMA_STATUS_DU))
                        break;
 
+               dmalen = le16_to_cpu(d->dma.length);
+               trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
+                                     d->dma.error);
                wil_dbg_txrx(wil,
                             "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
-                            vring->swtail, d->dma.length, d->dma.status,
+                            vring->swtail, dmalen, d->dma.status,
                             d->dma.error);
                wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
                                  (const void *)d, sizeof(*d), false);
 
-               pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+               pa = wil_desc_addr(&d->dma.addr);
                skb = vring->ctx[vring->swtail];
                if (skb) {
                        if (d->dma.error == 0) {
@@ -815,18 +849,21 @@ void wil_tx_complete(struct wil6210_priv *wil, int ringid)
                                ndev->stats.tx_errors++;
                        }
 
-                       dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+                       dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
                        dev_kfree_skb_any(skb);
                        vring->ctx[vring->swtail] = NULL;
                } else {
-                       dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+                       dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
                }
-               d->dma.addr_low = 0;
-               d->dma.addr_high = 0;
+               d->dma.addr.addr_low = 0;
+               d->dma.addr.addr_high = 0;
                d->dma.length = 0;
                d->dma.status = TX_DMA_STATUS_DU;
                vring->swtail = wil_vring_next_tail(vring);
+               done++;
        }
        if (wil_vring_avail_tx(vring) > vring->size/4)
                netif_tx_wake_all_queues(wil_to_ndev(wil));
+
+       return done;
 }
index adef12fb2aee3087c6211e3adc50ec63a6afeac8..859aea68a1faa72169f3578a43bd4b1abb607dce 100644 (file)
 #define WIL6210_RTAP_SIZE (128)
 
 /* Tx/Rx path */
+
+/*
+ * Common representation of physical address in Vring
+ */
+struct vring_dma_addr {
+       __le32 addr_low;
+       __le16 addr_high;
+} __packed;
+
+static inline dma_addr_t wil_desc_addr(struct vring_dma_addr *addr)
+{
+       return le32_to_cpu(addr->addr_low) |
+                          ((u64)le16_to_cpu(addr->addr_high) << 32);
+}
+
+static inline void wil_desc_addr_set(struct vring_dma_addr *addr,
+                                    dma_addr_t pa)
+{
+       addr->addr_low = cpu_to_le32(lower_32_bits(pa));
+       addr->addr_high = cpu_to_le16((u16)upper_32_bits(pa));
+}
+
 /*
  * Tx descriptor - MAC part
  * [dword 0]
@@ -179,6 +201,10 @@ struct vring_tx_mac {
 #define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1
 #define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100
 
+#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS 9
+#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_MARK_WB_MSK 0x200
+
 #define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10
 #define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1
 #define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400
@@ -216,13 +242,12 @@ struct vring_tx_mac {
 
 struct vring_tx_dma {
        u32 d0;
-       u32 addr_low;
-       u16 addr_high;
+       struct vring_dma_addr addr;
        u8  ip_length;
        u8  b11;       /* 0..6: mac_length; 7:ip_version */
        u8  error;     /* 0..2: err; 3..7: reserved; */
        u8  status;    /* 0: used; 1..7; reserved */
-       u16 length;
+       __le16 length;
 } __packed;
 
 /*
@@ -315,13 +340,12 @@ struct vring_rx_mac {
 
 struct vring_rx_dma {
        u32 d0;
-       u32 addr_low;
-       u16 addr_high;
+       struct vring_dma_addr addr;
        u8  ip_length;
        u8  b11;
        u8  error;
        u8  status;
-       u16 length;
+       __le16 length;
 } __packed;
 
 struct vring_tx_desc {
index 8f76ecd8a7e5e5ec0862b45053c254af66df9a62..44fdab51de7e52b9c9565c318c2b6eac2df3527e 100644 (file)
@@ -34,9 +34,11 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
 
 #define WIL6210_MEM_SIZE (2*1024*1024UL)
 
-#define WIL6210_RX_RING_SIZE (128)
-#define WIL6210_TX_RING_SIZE (128)
-#define WIL6210_MAX_TX_RINGS (24)
+#define WIL6210_RX_RING_SIZE   (128)
+#define WIL6210_TX_RING_SIZE   (128)
+#define WIL6210_MAX_TX_RINGS   (24) /* HW limit */
+#define WIL6210_MAX_CID                (8) /* HW limit */
+#define WIL6210_NAPI_BUDGET    (16) /* arbitrary */
 
 /* Hardware definitions begin */
 
@@ -184,6 +186,7 @@ struct vring {
 
 enum { /* for wil6210_priv.status */
        wil_status_fwready = 0,
+       wil_status_fwconnecting,
        wil_status_fwconnected,
        wil_status_dontscan,
        wil_status_reset_done,
@@ -239,6 +242,8 @@ struct wil6210_priv {
         * - consumed in thread by wmi_event_worker
         */
        spinlock_t wmi_ev_lock;
+       struct napi_struct napi_rx;
+       struct napi_struct napi_tx;
        /* DMA related */
        struct vring vring_rx;
        struct vring vring_tx[WIL6210_MAX_TX_RINGS];
@@ -267,9 +272,13 @@ struct wil6210_priv {
 #define wil_to_ndev(i) (wil_to_wdev(i)->netdev)
 #define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
 
-#define wil_dbg(wil, fmt, arg...) netdev_dbg(wil_to_ndev(wil), fmt, ##arg)
-#define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg)
-#define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg)
+int wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...);
+int wil_err(struct wil6210_priv *wil, const char *fmt, ...);
+int wil_info(struct wil6210_priv *wil, const char *fmt, ...);
+#define wil_dbg(wil, fmt, arg...) do { \
+       netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \
+       wil_dbg_trace(wil, fmt, ##arg); \
+} while (0)
 
 #define wil_dbg_irq(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
 #define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
@@ -320,7 +329,6 @@ int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
 int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
 int wmi_set_channel(struct wil6210_priv *wil, int channel);
 int wmi_get_channel(struct wil6210_priv *wil, int *channel);
-int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb);
 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
                       const void *mac_addr);
 int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
@@ -356,10 +364,12 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
 void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
 
 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
-void wil_tx_complete(struct wil6210_priv *wil, int ringid);
+int wil_tx_complete(struct wil6210_priv *wil, int ringid);
+void wil6210_unmask_irq_tx(struct wil6210_priv *wil);
 
 /* RX API */
-void wil_rx_handle(struct wil6210_priv *wil);
+void wil_rx_handle(struct wil6210_priv *wil, int *quota);
+void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
 
 int wil_iftype_nl2wmi(enum nl80211_iftype type);
 
index 45b04e383f9a1745e272ccfc8a8dd0abf79cfef9..dc8059ad4bab0d6d979d298d22d3bbf913211e85 100644 (file)
@@ -20,6 +20,7 @@
 #include "wil6210.h"
 #include "txrx.h"
 #include "wmi.h"
+#include "trace.h"
 
 /**
  * WMI event receiving - theory of operations
@@ -74,10 +75,11 @@ static const struct {
        {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
        {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
        {0x880000, 0x88a000, 0x880000}, /* various RGF */
-       {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */
+       {0x8c0000, 0x949000, 0x8c0000}, /* trivial mapping for upper area */
        /*
         * 920000..930000 ucode code RAM
         * 930000..932000 ucode data RAM
+        * 932000..949000 back-door debug data
         */
 };
 
@@ -246,6 +248,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
        iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
                  offsetof(struct wil6210_mbox_ctl, tx.head));
 
+       trace_wil6210_wmi_cmd(cmdid, buf, len);
+
        /* interrupt to FW */
        iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
 
@@ -311,8 +315,8 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
 
        wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
                    data->info.channel, data->info.mcs, data->info.snr);
-       wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
-                   le16_to_cpu(data->info.stype));
+       wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
+                   le16_to_cpu(fc));
        wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
                    data->info.qid, data->info.mid, data->info.cid);
 
@@ -406,7 +410,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
 
        if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
            (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
-               if (wdev->sme_state != CFG80211_SME_CONNECTING) {
+               if (!test_bit(wil_status_fwconnecting, &wil->status)) {
                        wil_err(wil, "Not in connecting state\n");
                        return;
                }
@@ -430,6 +434,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
 
                cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
        }
+       clear_bit(wil_status_fwconnecting, &wil->status);
        set_bit(wil_status_fwconnected, &wil->status);
 
        /* FIXME FW can transmit only ucast frames to peer */
@@ -635,8 +640,9 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                            hdr.flags);
                if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
                    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
-                       wil_dbg_wmi(wil, "WMI event 0x%04x\n",
-                                   evt->event.wmi.id);
+                       u16 id = le16_to_cpu(evt->event.wmi.id);
+                       wil_dbg_wmi(wil, "WMI event 0x%04x\n", id);
+                       trace_wil6210_wmi_event(id, &evt->event.wmi, len);
                }
                wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
                                 &evt->event.hdr, sizeof(hdr) + len, true);
@@ -724,7 +730,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
                .bcon_interval = cpu_to_le16(bi),
                .network_type = wmi_nettype,
                .disable_sec_offload = 1,
-               .channel = chan,
+               .channel = chan - 1,
        };
        struct {
                struct wil6210_mbox_hdr_wmi wmi;
@@ -734,8 +740,12 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
        if (!wil->secure_pcp)
                cmd.disable_sec = 1;
 
+       /*
+        * Processing time may be huge, in case of secure AP it takes about
+        * 3500ms for FW to start AP
+        */
        rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd),
-                     WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 100);
+                     WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000);
        if (rc)
                return rc;
 
@@ -829,40 +839,6 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
        return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
 }
 
-int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
-{
-       struct wmi_eapol_tx_cmd *cmd;
-       struct ethhdr *eth;
-       u16 eapol_len = skb->len - ETH_HLEN;
-       void *eapol = skb->data + ETH_HLEN;
-       uint i;
-       int rc;
-
-       skb_set_mac_header(skb, 0);
-       eth = eth_hdr(skb);
-       wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
-       for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
-               if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
-                       goto found_dest;
-       }
-
-       return -EINVAL;
-
- found_dest:
-       /* find out eapol data & len */
-       cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL);
-       if (!cmd)
-               return -EINVAL;
-
-       memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN);
-       cmd->eapol_len = cpu_to_le16(eapol_len);
-       memcpy(cmd->eapol, eapol, eapol_len);
-       rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len);
-       kfree(cmd);
-
-       return rc;
-}
-
 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
                       const void *mac_addr)
 {
index 078e6f3477a9dfb3aa9a1fd67c7cb0fd4c8fa8cb..3f21e0ba39badb430dee5db447d37d17dc332a11 100644 (file)
@@ -28,7 +28,7 @@ config B43
 
 config B43_BCMA
        bool "Support for BCMA bus"
-       depends on B43 && BCMA
+       depends on B43 && (BCMA = y || BCMA = B43)
        default y
 
 config B43_BCMA_EXTRA
@@ -39,7 +39,7 @@ config B43_BCMA_EXTRA
 
 config B43_SSB
        bool
-       depends on B43 && SSB
+       depends on B43 && (SSB = y || SSB = B43)
        default y
 
 # Auto-select SSB PCI-HOST support, if possible
@@ -111,6 +111,7 @@ config B43_PIO
 config B43_PHY_N
        bool "Support for 802.11n (N-PHY) devices"
        depends on B43
+       default y
        ---help---
          Support for the N-PHY.
 
@@ -132,6 +133,7 @@ config B43_PHY_LP
 config B43_PHY_HT
        bool "Support for HT-PHY (high throughput) devices"
        depends on B43 && B43_BCMA
+       default y
        ---help---
          Support for the HT-PHY.
 
index 4891e3df20585171da28f775586b07f3da1bb5ff..3f8e69c291460b6ac101b2d847d071bcd359e533 100644 (file)
 #include <linux/pci_ids.h>
 #include <linux/sched.h>
 #include <linux/completion.h>
+#include <linux/scatterlist.h>
 #include <linux/mmc/sdio.h>
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
 #include <linux/platform_data/brcmfmac-sdio.h>
 
 #include <defs.h>
@@ -303,6 +305,153 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
                *ret = retval;
 }
 
+/**
+ * brcmf_sdio_buffrw - SDIO interface function for block data access
+ * @sdiodev: brcmfmac sdio device
+ * @fn: SDIO function number
+ * @write: direction flag
+ * @addr: dongle memory address as source/destination
+ * @pkt: skb pointer
+ *
+ * This function takes the respbonsibility as the interface function to MMC
+ * stack for block data access. It assumes that the skb passed down by the
+ * caller has already been padded and aligned.
+ */
+static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
+                            bool write, u32 addr, struct sk_buff_head *pktlist)
+{
+       unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
+       unsigned int max_blks, max_req_sz;
+       unsigned short max_seg_sz, seg_sz;
+       unsigned char *pkt_data;
+       struct sk_buff *pkt_next = NULL;
+       struct mmc_request mmc_req;
+       struct mmc_command mmc_cmd;
+       struct mmc_data mmc_dat;
+       struct sg_table st;
+       struct scatterlist *sgl;
+       struct mmc_host *host;
+       int ret = 0;
+
+       if (!pktlist->qlen)
+               return -EINVAL;
+
+       brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+       if (brcmf_pm_resume_error(sdiodev))
+               return -EIO;
+
+       /* Single skb use the standard mmc interface */
+       if (pktlist->qlen == 1) {
+               pkt_next = pktlist->next;
+               req_sz = pkt_next->len + 3;
+               req_sz &= (uint)~3;
+
+               if (write)
+                       return sdio_memcpy_toio(sdiodev->func[fn], addr,
+                                               ((u8 *)(pkt_next->data)),
+                                               req_sz);
+               else if (fn == 1)
+                       return sdio_memcpy_fromio(sdiodev->func[fn],
+                                                 ((u8 *)(pkt_next->data)),
+                                                 addr, req_sz);
+               else
+                       /* function 2 read is FIFO operation */
+                       return sdio_readsb(sdiodev->func[fn],
+                                          ((u8 *)(pkt_next->data)), addr,
+                                          req_sz);
+       }
+
+       host = sdiodev->func[fn]->card->host;
+       func_blk_sz = sdiodev->func[fn]->cur_blksize;
+       /* Blocks per command is limited by host count, host transfer
+        * size and the maximum for IO_RW_EXTENDED of 511 blocks.
+        */
+       max_blks = min_t(unsigned int, host->max_blk_count, 511u);
+       max_req_sz = min_t(unsigned int, host->max_req_size,
+                          max_blks * func_blk_sz);
+       max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
+       max_seg_sz = min_t(unsigned short, max_seg_sz, pktlist->qlen);
+       seg_sz = pktlist->qlen;
+       pkt_offset = 0;
+       pkt_next = pktlist->next;
+
+       if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL))
+               return -ENOMEM;
+
+       while (seg_sz) {
+               req_sz = 0;
+               sg_cnt = 0;
+               memset(&mmc_req, 0, sizeof(struct mmc_request));
+               memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+               memset(&mmc_dat, 0, sizeof(struct mmc_data));
+               sgl = st.sgl;
+               /* prep sg table */
+               while (pkt_next != (struct sk_buff *)pktlist) {
+                       pkt_data = pkt_next->data + pkt_offset;
+                       sg_data_sz = pkt_next->len - pkt_offset;
+                       if (sg_data_sz > host->max_seg_size)
+                               sg_data_sz = host->max_seg_size;
+                       if (sg_data_sz > max_req_sz - req_sz)
+                               sg_data_sz = max_req_sz - req_sz;
+
+                       sg_set_buf(sgl, pkt_data, sg_data_sz);
+
+                       sg_cnt++;
+                       sgl = sg_next(sgl);
+                       req_sz += sg_data_sz;
+                       pkt_offset += sg_data_sz;
+                       if (pkt_offset == pkt_next->len) {
+                               pkt_offset = 0;
+                               pkt_next = pkt_next->next;
+                       }
+
+                       if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz)
+                               break;
+               }
+               seg_sz -= sg_cnt;
+
+               if (req_sz % func_blk_sz != 0) {
+                       brcmf_err("sg request length %u is not %u aligned\n",
+                                 req_sz, func_blk_sz);
+                       sg_free_table(&st);
+                       return -ENOTBLK;
+               }
+               mmc_dat.sg = st.sgl;
+               mmc_dat.sg_len = sg_cnt;
+               mmc_dat.blksz = func_blk_sz;
+               mmc_dat.blocks = req_sz / func_blk_sz;
+               mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+               mmc_cmd.opcode = SD_IO_RW_EXTENDED;
+               mmc_cmd.arg = write ? 1<<31 : 0;        /* write flag  */
+               mmc_cmd.arg |= (fn & 0x7) << 28;        /* SDIO func num */
+               mmc_cmd.arg |= 1<<27;                   /* block mode */
+               /* incrementing addr for function 1 */
+               mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
+               mmc_cmd.arg |= (addr & 0x1FFFF) << 9;   /* address */
+               mmc_cmd.arg |= mmc_dat.blocks & 0x1FF;  /* block count */
+               mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+               mmc_req.cmd = &mmc_cmd;
+               mmc_req.data = &mmc_dat;
+               if (fn == 1)
+                       addr += req_sz;
+
+               mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
+               mmc_wait_for_req(host, &mmc_req);
+
+               ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
+               if (ret != 0) {
+                       brcmf_err("CMD53 sg block %s failed %d\n",
+                                 write ? "write" : "read", ret);
+                       ret = -EIO;
+                       break;
+               }
+       }
+
+       sg_free_table(&st);
+
+       return ret;
+}
+
 static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
                                     uint flags, uint width, u32 *addr)
 {
@@ -355,9 +504,9 @@ int
 brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
                      uint flags, struct sk_buff *pkt)
 {
-       uint incr_fix;
        uint width;
        int err = 0;
+       struct sk_buff_head pkt_list;
 
        brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
                  fn, addr, pkt->len);
@@ -367,9 +516,10 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        if (err)
                goto done;
 
-       incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
-       err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
-                                        fn, addr, pkt);
+       skb_queue_head_init(&pkt_list);
+       skb_queue_tail(&pkt_list, pkt);
+       err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list);
+       skb_dequeue_tail(&pkt_list);
 
 done:
        return err;
@@ -391,8 +541,7 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
                goto done;
 
        incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
-       err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
-                                       pktq);
+       err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq);
 
 done:
        return err;
@@ -424,10 +573,10 @@ int
 brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
                      uint flags, struct sk_buff *pkt)
 {
-       uint incr_fix;
        uint width;
        uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
        int err = 0;
+       struct sk_buff_head pkt_list;
 
        brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
                  fn, addr, pkt->len);
@@ -446,13 +595,14 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
 
        addr &= SBSDIO_SB_OFT_ADDR_MASK;
 
-       incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
        width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
        if (width == 4)
                addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
-       err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
-                                        addr, pkt);
+       skb_queue_head_init(&pkt_list);
+       skb_queue_tail(&pkt_list, pkt);
+       err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, &pkt_list);
+       skb_dequeue_tail(&pkt_list);
 
 done:
        return err;
@@ -466,6 +616,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
        struct sk_buff *pkt;
        u32 sdaddr;
        uint dsize;
+       struct sk_buff_head pkt_list;
 
        dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
        pkt = dev_alloc_skb(dsize);
@@ -474,6 +625,7 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
                return -EIO;
        }
        pkt->priority = 0;
+       skb_queue_head_init(&pkt_list);
 
        /* Determine initial transfer parameters */
        sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
@@ -501,9 +653,10 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
                skb_put(pkt, dsize);
                if (write)
                        memcpy(pkt->data, data, dsize);
-               bcmerror = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
-                                                     write, SDIO_FUNC_1,
-                                                     sdaddr, pkt);
+               skb_queue_tail(&pkt_list, pkt);
+               bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
+                                            sdaddr, &pkt_list);
+               skb_dequeue_tail(&pkt_list);
                if (bcmerror) {
                        brcmf_err("membytes transfer failed\n");
                        break;
index 44fa0cdbf97b0772dc2651c98b5004d5040287ed..289e386f01f66facd7f7e62f4f618f55d8afeaf6 100644 (file)
@@ -66,7 +66,7 @@ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
 static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
 
 
-static bool
+bool
 brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
 {
        bool is_err = false;
@@ -76,7 +76,7 @@ brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev)
        return is_err;
 }
 
-static void
+void
 brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev, wait_queue_head_t *wq)
 {
 #ifdef CONFIG_PM_SLEEP
@@ -211,115 +211,6 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
        return err_ret;
 }
 
-/* precondition: host controller is claimed */
-static int
-brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
-                        uint func, uint addr, struct sk_buff *pkt, uint pktlen)
-{
-       int err_ret = 0;
-
-       if ((write) && (!fifo)) {
-               err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
-                                          ((u8 *) (pkt->data)), pktlen);
-       } else if (write) {
-               err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
-                                          ((u8 *) (pkt->data)), pktlen);
-       } else if (fifo) {
-               err_ret = sdio_readsb(sdiodev->func[func],
-                                     ((u8 *) (pkt->data)), addr, pktlen);
-       } else {
-               err_ret = sdio_memcpy_fromio(sdiodev->func[func],
-                                            ((u8 *) (pkt->data)),
-                                            addr, pktlen);
-       }
-
-       return err_ret;
-}
-
-/*
- * This function takes a queue of packets. The packets on the queue
- * are assumed to be properly aligned by the caller.
- */
-int
-brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
-                         uint write, uint func, uint addr,
-                         struct sk_buff_head *pktq)
-{
-       bool fifo = (fix_inc == SDIOH_DATA_FIX);
-       u32 SGCount = 0;
-       int err_ret = 0;
-
-       struct sk_buff *pkt;
-
-       brcmf_dbg(SDIO, "Enter\n");
-
-       brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
-       if (brcmf_pm_resume_error(sdiodev))
-               return -EIO;
-
-       skb_queue_walk(pktq, pkt) {
-               uint pkt_len = pkt->len;
-               pkt_len += 3;
-               pkt_len &= 0xFFFFFFFC;
-
-               err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
-                                                  addr, pkt, pkt_len);
-               if (err_ret) {
-                       brcmf_err("%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
-                                 write ? "TX" : "RX", pkt, SGCount, addr,
-                                 pkt_len, err_ret);
-               } else {
-                       brcmf_dbg(SDIO, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
-                                 write ? "TX" : "RX", pkt, SGCount, addr,
-                                 pkt_len);
-               }
-               if (!fifo)
-                       addr += pkt_len;
-
-               SGCount++;
-       }
-
-       brcmf_dbg(SDIO, "Exit\n");
-       return err_ret;
-}
-
-/*
- * This function takes a single DMA-able packet.
- */
-int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
-                              uint fix_inc, uint write, uint func, uint addr,
-                              struct sk_buff *pkt)
-{
-       int status;
-       uint pkt_len;
-       bool fifo = (fix_inc == SDIOH_DATA_FIX);
-
-       brcmf_dbg(SDIO, "Enter\n");
-
-       if (pkt == NULL)
-               return -EINVAL;
-       pkt_len = pkt->len;
-
-       brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
-       if (brcmf_pm_resume_error(sdiodev))
-               return -EIO;
-
-       pkt_len += 3;
-       pkt_len &= (uint)~3;
-
-       status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
-                                          addr, pkt, pkt_len);
-       if (status) {
-               brcmf_err("%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
-                         write ? "TX" : "RX", pkt, addr, pkt_len, status);
-       } else {
-               brcmf_dbg(SDIO, "%s xfr'd %p, addr=0x%05x, len=%d\n",
-                         write ? "TX" : "RX", pkt, addr, pkt_len);
-       }
-
-       return status;
-}
-
 static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
 {
        /* read 24 bits and return valid 17 bit addr */
@@ -468,7 +359,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
        atomic_set(&sdiodev->suspend, false);
        init_waitqueue_head(&sdiodev->request_byte_wait);
        init_waitqueue_head(&sdiodev->request_word_wait);
-       init_waitqueue_head(&sdiodev->request_chain_wait);
        init_waitqueue_head(&sdiodev->request_buffer_wait);
 
        brcmf_dbg(SDIO, "F2 found, calling brcmf_sdio_probe...\n");
@@ -606,7 +496,8 @@ static int brcmf_sdio_pd_remove(struct platform_device *pdev)
 static struct platform_driver brcmf_sdio_pd = {
        .remove         = brcmf_sdio_pd_remove,
        .driver         = {
-               .name   = BRCMFMAC_SDIO_PDATA_NAME
+               .name   = BRCMFMAC_SDIO_PDATA_NAME,
+               .owner  = THIS_MODULE,
        }
 };
 
index 28db9cf3967261e71899c0e0952e66679c4d9c40..86cbfe2c7c6cbbf404827a6cfff726b12499cf3f 100644 (file)
@@ -583,6 +583,7 @@ enum brcmf_netif_stop_reason {
  * @bssidx: index of bss associated with this interface.
  * @mac_addr: assigned mac address.
  * @netif_stop: bitmap indicates reason why netif queues are stopped.
+ * @netif_stop_lock: spinlock for update netif_stop from multiple sources.
  * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
  * @pend_8021x_wait: used for signalling change in count.
  */
@@ -598,6 +599,7 @@ struct brcmf_if {
        s32 bssidx;
        u8 mac_addr[ETH_ALEN];
        u8 netif_stop;
+       spinlock_t netif_stop_lock;
        atomic_t pend_8021x_cnt;
        wait_queue_head_t pend_8021x_wait;
 };
index 59c77aa3b9597894385a9b8c4c8d72aa5f6db84f..dd85401063cb1e484fe47f423b91d4bd3f9f39af 100644 (file)
@@ -30,6 +30,7 @@
 #include "dhd_bus.h"
 #include "fwsignal.h"
 #include "dhd_dbg.h"
+#include "tracepoint.h"
 
 struct brcmf_proto_cdc_dcmd {
        __le32 cmd;     /* dongle command value */
@@ -292,6 +293,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
        h->flags2 = 0;
        h->data_offset = offset;
        BDC_SET_IF_IDX(h, ifidx);
+       trace_brcmf_bdchdr(pktbuf->data);
 }
 
 int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
@@ -309,6 +311,7 @@ int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
                return -EBADE;
        }
 
+       trace_brcmf_bdchdr(pktbuf->data);
        h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
 
        *ifidx = BDC_GET_IF_IDX(h);
index 202869cd0932619a54c6f6914ead97b24b8907bf..c37b9d68e458880c36019851a704ee3cbef5d337 100644 (file)
@@ -156,8 +156,11 @@ ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data,
                        "txs_suppr_core:    %u\n"
                        "txs_suppr_ps:      %u\n"
                        "txs_tossed:        %u\n"
+                       "txs_host_tossed:   %u\n"
+                       "bus_flow_block:    %u\n"
+                       "fws_flow_block:    %u\n"
                        "send_pkts:         BK:%u BE:%u VO:%u VI:%u BCMC:%u\n"
-                       "fifo_credits_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n",
+                       "requested_sent:    BK:%u BE:%u VO:%u VI:%u BCMC:%u\n",
                        fwstats->header_pulls,
                        fwstats->header_only_pkt,
                        fwstats->tlv_parse_failed,
@@ -176,14 +179,17 @@ ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data,
                        fwstats->txs_supp_core,
                        fwstats->txs_supp_ps,
                        fwstats->txs_tossed,
+                       fwstats->txs_host_tossed,
+                       fwstats->bus_flow_block,
+                       fwstats->fws_flow_block,
                        fwstats->send_pkts[0], fwstats->send_pkts[1],
                        fwstats->send_pkts[2], fwstats->send_pkts[3],
                        fwstats->send_pkts[4],
-                       fwstats->fifo_credits_sent[0],
-                       fwstats->fifo_credits_sent[1],
-                       fwstats->fifo_credits_sent[2],
-                       fwstats->fifo_credits_sent[3],
-                       fwstats->fifo_credits_sent[4]);
+                       fwstats->requested_sent[0],
+                       fwstats->requested_sent[1],
+                       fwstats->requested_sent[2],
+                       fwstats->requested_sent[3],
+                       fwstats->requested_sent[4]);
 
        return simple_read_from_buffer(data, count, ppos, buf, res);
 }
index 009c87bfd9ae45e292af73082e6eccba3e1e3b1e..0af1f5dc583a8fd7dd9a550b6e3e5f585ab64925 100644 (file)
@@ -141,8 +141,7 @@ struct brcmf_fws_stats {
        u32 header_pulls;
        u32 pkt2bus;
        u32 send_pkts[5];
-       u32 fifo_credits_sent[5];
-       u32 fifo_credits_back[6];
+       u32 requested_sent[5];
        u32 generic_error;
        u32 mac_update_failed;
        u32 mac_ps_update_failed;
@@ -158,6 +157,9 @@ struct brcmf_fws_stats {
        u32 txs_supp_core;
        u32 txs_supp_ps;
        u32 txs_tossed;
+       u32 txs_host_tossed;
+       u32 bus_flow_block;
+       u32 fws_flow_block;
 };
 
 struct brcmf_pub;
index 2c593570497ceaa6a4f430764bddbe1b6bd4f651..8c402e7b97eb8b861459e69ea8c2f287f46948c5 100644 (file)
@@ -179,7 +179,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
        struct brcmf_pub *drvr = ifp->drvr;
        struct ethhdr *eh;
 
-       brcmf_dbg(TRACE, "Enter, idx=%d\n", ifp->bssidx);
+       brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
 
        /* Can the device send data? */
        if (drvr->bus_if->state != BRCMF_BUS_DATA) {
@@ -240,11 +240,15 @@ done:
 void brcmf_txflowblock_if(struct brcmf_if *ifp,
                          enum brcmf_netif_stop_reason reason, bool state)
 {
+       unsigned long flags;
+
        if (!ifp)
                return;
 
        brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
                  ifp->bssidx, ifp->netif_stop, reason, state);
+
+       spin_lock_irqsave(&ifp->netif_stop_lock, flags);
        if (state) {
                if (!ifp->netif_stop)
                        netif_stop_queue(ifp->ndev);
@@ -254,6 +258,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
                if (!ifp->netif_stop)
                        netif_wake_queue(ifp->ndev);
        }
+       spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
 }
 
 void brcmf_txflowblock(struct device *dev, bool state)
@@ -264,9 +269,14 @@ void brcmf_txflowblock(struct device *dev, bool state)
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       for (i = 0; i < BRCMF_MAX_IFS; i++)
-               brcmf_txflowblock_if(drvr->iflist[i],
-                                    BRCMF_NETIF_STOP_REASON_BLOCK_BUS, state);
+       if (brcmf_fws_fc_active(drvr->fws)) {
+               brcmf_fws_bus_blocked(drvr, state);
+       } else {
+               for (i = 0; i < BRCMF_MAX_IFS; i++)
+                       brcmf_txflowblock_if(drvr->iflist[i],
+                                            BRCMF_NETIF_STOP_REASON_BLOCK_BUS,
+                                            state);
+       }
 }
 
 void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
@@ -280,7 +290,7 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
        u8 ifidx;
        int ret;
 
-       brcmf_dbg(TRACE, "Enter\n");
+       brcmf_dbg(DATA, "Enter\n");
 
        skb_queue_walk_safe(skb_list, skb, pnext) {
                skb_unlink(skb, skb_list);
@@ -630,7 +640,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
        /* set appropriate operations */
        ndev->netdev_ops = &brcmf_netdev_ops_pri;
 
-       ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
+       ndev->hard_header_len += drvr->hdrlen;
        ndev->ethtool_ops = &brcmf_ethtool_ops;
 
        drvr->rxsz = ndev->mtu + ndev->hard_header_len +
@@ -779,6 +789,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
        ifp->bssidx = bssidx;
 
        init_waitqueue_head(&ifp->pend_8021x_wait);
+       spin_lock_init(&ifp->netif_stop_lock);
 
        if (mac_addr != NULL)
                memcpy(ifp->mac_addr, mac_addr, ETH_ALEN);
index d2487518bd2a6a553a635f5c9d0a1e6996f66594..264111968320e19950880bc910bc90818169577d 100644 (file)
@@ -448,8 +448,6 @@ struct brcmf_sdio {
        uint rxblen;            /* Allocated length of rxbuf */
        u8 *rxctl;              /* Aligned pointer into rxbuf */
        u8 *rxctl_orig;         /* pointer for freeing rxctl */
-       u8 *databuf;            /* Buffer for receiving big glom packet */
-       u8 *dataptr;            /* Aligned pointer into databuf */
        uint rxlen;             /* Length of valid data in buffer */
        spinlock_t rxctl_lock;  /* protection lock for ctrl frame resources */
 
@@ -473,8 +471,6 @@ struct brcmf_sdio {
        s32 idletime;           /* Control for activity timeout */
        s32 idlecount;  /* Activity timeout counter */
        s32 idleclock;  /* How to set bus driver when idle */
-       s32 sd_rxchain;
-       bool use_rxchain;       /* If brcmf should use PKT chains */
        bool rxflow_mode;       /* Rx flow control mode */
        bool rxflow;            /* Is rx flow control on */
        bool alp_only;          /* Don't use HT clock (ALP only) */
@@ -495,8 +491,7 @@ struct brcmf_sdio {
 
        struct workqueue_struct *brcmf_wq;
        struct work_struct datawork;
-       struct list_head dpc_tsklst;
-       spinlock_t dpc_tl_lock;
+       atomic_t dpc_tskcnt;
 
        const struct firmware *firmware;
        u32 fw_ptr;
@@ -1026,29 +1021,6 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
                bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
 }
 
-/* copy a buffer into a pkt buffer chain */
-static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
-{
-       uint n, ret = 0;
-       struct sk_buff *p;
-       u8 *buf;
-
-       buf = bus->dataptr;
-
-       /* copy the data */
-       skb_queue_walk(&bus->glom, p) {
-               n = min_t(uint, p->len, len);
-               memcpy(p->data, buf, n);
-               buf += n;
-               len -= n;
-               ret += n;
-               if (!len)
-                       break;
-       }
-
-       return ret;
-}
-
 /* return total length of buffer chain */
 static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
 {
@@ -1202,8 +1174,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
        int errcode;
        u8 doff, sfdoff;
 
-       bool usechain = bus->use_rxchain;
-
        struct brcmf_sdio_read rd_new;
 
        /* If packets, issue read(s) and send up packet chain */
@@ -1238,7 +1208,6 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                        if (sublen % BRCMF_SDALIGN) {
                                brcmf_err("sublen %d not multiple of %d\n",
                                          sublen, BRCMF_SDALIGN);
-                               usechain = false;
                        }
                        totlen += sublen;
 
@@ -1305,27 +1274,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                 * packet and and copy into the chain.
                 */
                sdio_claim_host(bus->sdiodev->func[1]);
-               if (usechain) {
-                       errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
-                                       bus->sdiodev->sbwad,
-                                       SDIO_FUNC_2, F2SYNC, &bus->glom);
-               } else if (bus->dataptr) {
-                       errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
-                                       bus->sdiodev->sbwad,
-                                       SDIO_FUNC_2, F2SYNC,
-                                       bus->dataptr, dlen);
-                       sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
-                       if (sublen != dlen) {
-                               brcmf_err("FAILED TO COPY, dlen %d sublen %d\n",
-                                         dlen, sublen);
-                               errcode = -1;
-                       }
-                       pnext = NULL;
-               } else {
-                       brcmf_err("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n",
-                                 dlen);
-                       errcode = -1;
-               }
+               errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
+                               bus->sdiodev->sbwad,
+                               SDIO_FUNC_2, F2SYNC, &bus->glom);
                sdio_release_host(bus->sdiodev->func[1]);
                bus->sdcnt.f2rxdata++;
 
@@ -2061,23 +2012,6 @@ static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio *bus)
        }
 }
 
-static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
-{
-       struct list_head *new_hd;
-       unsigned long flags;
-
-       if (in_interrupt())
-               new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
-       else
-               new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
-       if (new_hd == NULL)
-               return;
-
-       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-       list_add_tail(new_hd, &bus->dpc_tsklst);
-       spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-}
-
 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 {
        u8 idx;
@@ -2312,7 +2246,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
                   (!atomic_read(&bus->fcstate) &&
                    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
                    data_ok(bus)) || PKT_AVAILABLE()) {
-               brcmf_sdbrcm_adddpctsk(bus);
+               atomic_inc(&bus->dpc_tskcnt);
        }
 
        /* If we're done for now, turn off clock request. */
@@ -2342,7 +2276,6 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
-       unsigned long flags;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -2369,26 +2302,21 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
        } else {
                ret = 0;
        }
-       spin_unlock_bh(&bus->txqlock);
 
        if (pktq_len(&bus->txq) >= TXHI) {
                bus->txoff = true;
                brcmf_txflowblock(bus->sdiodev->dev, true);
        }
+       spin_unlock_bh(&bus->txqlock);
 
 #ifdef DEBUG
        if (pktq_plen(&bus->txq, prec) > qcount[prec])
                qcount[prec] = pktq_plen(&bus->txq, prec);
 #endif
 
-       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-       if (list_empty(&bus->dpc_tsklst)) {
-               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-
-               brcmf_sdbrcm_adddpctsk(bus);
+       if (atomic_read(&bus->dpc_tskcnt) == 0) {
+               atomic_inc(&bus->dpc_tskcnt);
                queue_work(bus->brcmf_wq, &bus->datawork);
-       } else {
-               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
        }
 
        return ret;
@@ -2525,7 +2453,6 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
-       unsigned long flags;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -2612,18 +2539,13 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
                } while (ret < 0 && retries++ < TXRETRIES);
        }
 
-       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
        if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
-           list_empty(&bus->dpc_tsklst)) {
-               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
-
+           atomic_read(&bus->dpc_tskcnt) == 0) {
                bus->activity = false;
                sdio_claim_host(bus->sdiodev->func[1]);
                brcmf_dbg(INFO, "idle\n");
                brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
                sdio_release_host(bus->sdiodev->func[1]);
-       } else {
-               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
        }
 
        if (ret)
@@ -3451,7 +3373,7 @@ void brcmf_sdbrcm_isr(void *arg)
        if (!bus->intr)
                brcmf_err("isr w/o interrupt configured!\n");
 
-       brcmf_sdbrcm_adddpctsk(bus);
+       atomic_inc(&bus->dpc_tskcnt);
        queue_work(bus->brcmf_wq, &bus->datawork);
 }
 
@@ -3460,7 +3382,6 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
 #ifdef DEBUG
        struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
 #endif /* DEBUG */
-       unsigned long flags;
 
        brcmf_dbg(TIMER, "Enter\n");
 
@@ -3476,11 +3397,9 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
                if (!bus->intr ||
                    (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
 
-                       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-                       if (list_empty(&bus->dpc_tsklst)) {
+                       if (atomic_read(&bus->dpc_tskcnt) == 0) {
                                u8 devpend;
-                               spin_unlock_irqrestore(&bus->dpc_tl_lock,
-                                                      flags);
+
                                sdio_claim_host(bus->sdiodev->func[1]);
                                devpend = brcmf_sdio_regrb(bus->sdiodev,
                                                           SDIO_CCCR_INTx,
@@ -3489,9 +3408,6 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
                                intstatus =
                                    devpend & (INTR_STATUS_FUNC1 |
                                               INTR_STATUS_FUNC2);
-                       } else {
-                               spin_unlock_irqrestore(&bus->dpc_tl_lock,
-                                                      flags);
                        }
 
                        /* If there is something, make like the ISR and
@@ -3500,7 +3416,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
                                bus->sdcnt.pollcnt++;
                                atomic_set(&bus->ipend, 1);
 
-                               brcmf_sdbrcm_adddpctsk(bus);
+                               atomic_inc(&bus->dpc_tskcnt);
                                queue_work(bus->brcmf_wq, &bus->datawork);
                        }
                }
@@ -3545,41 +3461,15 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
        return (atomic_read(&bus->ipend) > 0);
 }
 
-static bool brcmf_sdbrcm_chipmatch(u16 chipid)
-{
-       if (chipid == BCM43143_CHIP_ID)
-               return true;
-       if (chipid == BCM43241_CHIP_ID)
-               return true;
-       if (chipid == BCM4329_CHIP_ID)
-               return true;
-       if (chipid == BCM4330_CHIP_ID)
-               return true;
-       if (chipid == BCM4334_CHIP_ID)
-               return true;
-       if (chipid == BCM4335_CHIP_ID)
-               return true;
-       return false;
-}
-
 static void brcmf_sdio_dataworker(struct work_struct *work)
 {
        struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio,
                                              datawork);
-       struct list_head *cur_hd, *tmp_hd;
-       unsigned long flags;
-
-       spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-       list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
-               spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
 
+       while (atomic_read(&bus->dpc_tskcnt)) {
                brcmf_sdbrcm_dpc(bus);
-
-               spin_lock_irqsave(&bus->dpc_tl_lock, flags);
-               list_del(cur_hd);
-               kfree(cur_hd);
+               atomic_dec(&bus->dpc_tskcnt);
        }
-       spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
 }
 
 static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
@@ -3589,9 +3479,6 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
        kfree(bus->rxbuf);
        bus->rxctl = bus->rxbuf = NULL;
        bus->rxlen = 0;
-
-       kfree(bus->databuf);
-       bus->databuf = NULL;
 }
 
 static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
@@ -3604,29 +3491,10 @@ static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
                            ALIGNMENT) + BRCMF_SDALIGN;
                bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
                if (!(bus->rxbuf))
-                       goto fail;
-       }
-
-       /* Allocate buffer to receive glomed packet */
-       bus->databuf = kmalloc(MAX_DATA_BUF, GFP_ATOMIC);
-       if (!(bus->databuf)) {
-               /* release rxbuf which was already located as above */
-               if (!bus->rxblen)
-                       kfree(bus->rxbuf);
-               goto fail;
+                       return false;
        }
 
-       /* Align the buffer */
-       if ((unsigned long)bus->databuf % BRCMF_SDALIGN)
-               bus->dataptr = bus->databuf + (BRCMF_SDALIGN -
-                              ((unsigned long)bus->databuf % BRCMF_SDALIGN));
-       else
-               bus->dataptr = bus->databuf;
-
        return true;
-
-fail:
-       return false;
 }
 
 static bool
@@ -3667,11 +3535,6 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
                goto fail;
        }
 
-       if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) {
-               brcmf_err("unsupported chip: 0x%04x\n", bus->ci->chip);
-               goto fail;
-       }
-
        if (brcmf_sdbrcm_kso_init(bus)) {
                brcmf_err("error enabling KSO\n");
                goto fail;
@@ -3770,10 +3633,6 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
        bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
        bus->roundup = min(max_roundup, bus->blocksize);
 
-       /* bus module does not support packet chaining */
-       bus->use_rxchain = false;
-       bus->sd_rxchain = false;
-
        /* SR state */
        bus->sleeping = false;
        bus->sr_enabled = false;
@@ -3927,8 +3786,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
                bus->watchdog_tsk = NULL;
        }
        /* Initialize DPC thread */
-       INIT_LIST_HEAD(&bus->dpc_tsklst);
-       spin_lock_init(&bus->dpc_tl_lock);
+       atomic_set(&bus->dpc_tskcnt, 0);
 
        /* Assign bus interface call back */
        bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
index 6ec5db9c60a52196e2cd1273efda7d7652b031ba..e679214b3c98be52662e9f7f09d1e90eb0b62ec7 100644 (file)
@@ -101,7 +101,8 @@ struct brcmf_event;
        BRCMF_ENUM_DEF(P2P_PROBEREQ_MSG, 72) \
        BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
        BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
-       BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75)
+       BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
+       BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
 
 #define BRCMF_ENUM_DEF(id, val) \
        BRCMF_E_##id = (val),
index 5352dc1fdf3ca6c03b48e4c27952d32da25eaa2f..13e75c4b1a6b291e712ebfb39ea820d4dbcb333f 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/etherdevice.h>
 #include <linux/err.h>
 #include <linux/jiffies.h>
-#include <uapi/linux/nl80211.h>
 #include <net/cfg80211.h>
 
 #include <brcmu_utils.h>
@@ -142,7 +141,7 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
 #define BRCMF_FWS_FLOWCONTROL_HIWATER                  128
 #define BRCMF_FWS_FLOWCONTROL_LOWATER                  64
 
-#define BRCMF_FWS_PSQ_PREC_COUNT               ((NL80211_NUM_ACS + 1) * 2)
+#define BRCMF_FWS_PSQ_PREC_COUNT               ((BRCMF_FWS_FIFO_COUNT + 1) * 2)
 #define BRCMF_FWS_PSQ_LEN                              256
 
 #define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST                        0x01
@@ -157,11 +156,13 @@ static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
  * @BRCMF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver.
  * @BRCMF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue.
  * @BRCMF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware.
+ * @BRCMF_FWS_SKBSTATE_TIM: allocated for TIM update info.
  */
 enum brcmf_fws_skb_state {
        BRCMF_FWS_SKBSTATE_NEW,
        BRCMF_FWS_SKBSTATE_DELAYED,
-       BRCMF_FWS_SKBSTATE_SUPPRESSED
+       BRCMF_FWS_SKBSTATE_SUPPRESSED,
+       BRCMF_FWS_SKBSTATE_TIM
 };
 
 /**
@@ -193,9 +194,8 @@ struct brcmf_skbuff_cb {
  *     b[11]  - packet sent upon firmware request.
  *     b[10]  - packet only contains signalling data.
  *     b[9]   - packet is a tx packet.
- *     b[8]   - packet uses FIFO credit (non-pspoll).
+ *     b[8]   - packet used requested credit
  *     b[7]   - interface in AP mode.
- *     b[6:4] - AC FIFO number.
  *     b[3:0] - interface index.
  */
 #define BRCMF_SKB_IF_FLAGS_REQUESTED_MASK      0x0800
@@ -204,12 +204,10 @@ struct brcmf_skbuff_cb {
 #define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT   10
 #define BRCMF_SKB_IF_FLAGS_TRANSMIT_MASK        0x0200
 #define BRCMF_SKB_IF_FLAGS_TRANSMIT_SHIFT      9
-#define BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK    0x0100
-#define BRCMF_SKB_IF_FLAGS_CREDITCHECK_SHIFT   8
+#define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_MASK     0x0100
+#define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_SHIFT    8
 #define BRCMF_SKB_IF_FLAGS_IF_AP_MASK          0x0080
 #define BRCMF_SKB_IF_FLAGS_IF_AP_SHIFT         7
-#define BRCMF_SKB_IF_FLAGS_FIFO_MASK           0x0070
-#define BRCMF_SKB_IF_FLAGS_FIFO_SHIFT          4
 #define BRCMF_SKB_IF_FLAGS_INDEX_MASK          0x000f
 #define BRCMF_SKB_IF_FLAGS_INDEX_SHIFT         0
 
@@ -246,7 +244,7 @@ struct brcmf_skbuff_cb {
 #define BRCMF_SKB_HTOD_TAG_HSLOT_MASK                  0x00ffff00
 #define BRCMF_SKB_HTOD_TAG_HSLOT_SHIFT                 8
 #define BRCMF_SKB_HTOD_TAG_FREERUN_MASK                        0x000000ff
-#define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT                       0
+#define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT               0
 
 #define brcmf_skb_htod_tag_set_field(skb, field, value) \
        brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \
@@ -278,6 +276,7 @@ struct brcmf_skbuff_cb {
 /**
  * enum brcmf_fws_fifo - fifo indices used by dongle firmware.
  *
+ * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background.
  * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic.
  * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic.
  * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic.
@@ -287,7 +286,8 @@ struct brcmf_skbuff_cb {
  * @BRCMF_FWS_FIFO_COUNT: number of fifos.
  */
 enum brcmf_fws_fifo {
-       BRCMF_FWS_FIFO_AC_BK,
+       BRCMF_FWS_FIFO_FIRST,
+       BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST,
        BRCMF_FWS_FIFO_AC_BE,
        BRCMF_FWS_FIFO_AC_VI,
        BRCMF_FWS_FIFO_AC_VO,
@@ -307,12 +307,15 @@ enum brcmf_fws_fifo {
  *     firmware suppress the packet as device is already in PS mode.
  * @BRCMF_FWS_TXSTATUS_FW_TOSSED:
  *     firmware tossed the packet.
+ * @BRCMF_FWS_TXSTATUS_HOST_TOSSED:
+ *     host tossed the packet.
  */
 enum brcmf_fws_txstatus {
        BRCMF_FWS_TXSTATUS_DISCARD,
        BRCMF_FWS_TXSTATUS_CORE_SUPPRESS,
        BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS,
-       BRCMF_FWS_TXSTATUS_FW_TOSSED
+       BRCMF_FWS_TXSTATUS_FW_TOSSED,
+       BRCMF_FWS_TXSTATUS_HOST_TOSSED
 };
 
 enum brcmf_fws_fcmode {
@@ -343,6 +346,7 @@ enum brcmf_fws_mac_desc_state {
  * @transit_count: packet in transit to firmware.
  */
 struct brcmf_fws_mac_descriptor {
+       char name[16];
        u8 occupied;
        u8 mac_handle;
        u8 interface_id;
@@ -356,7 +360,6 @@ struct brcmf_fws_mac_descriptor {
        u8 seq[BRCMF_FWS_FIFO_COUNT];
        struct pktq psq;
        int transit_count;
-       int suppress_count;
        int suppr_transit_count;
        bool send_tim_signal;
        u8 traffic_pending_bmp;
@@ -383,12 +386,10 @@ enum brcmf_fws_hanger_item_state {
  * struct brcmf_fws_hanger_item - single entry for tx pending packet.
  *
  * @state: entry is either free or occupied.
- * @gen: generation.
  * @pkt: packet itself.
  */
 struct brcmf_fws_hanger_item {
        enum brcmf_fws_hanger_item_state state;
-       u8 gen;
        struct sk_buff *pkt;
 };
 
@@ -424,6 +425,7 @@ struct brcmf_fws_info {
        struct brcmf_fws_stats stats;
        struct brcmf_fws_hanger hanger;
        enum brcmf_fws_fcmode fcmode;
+       bool bcmc_credit_check;
        struct brcmf_fws_macdesc_table desc;
        struct workqueue_struct *fws_wq;
        struct work_struct fws_dequeue_work;
@@ -434,6 +436,8 @@ struct brcmf_fws_info {
        u32 fifo_credit_map;
        u32 fifo_delay_map;
        unsigned long borrow_defer_timestamp;
+       bool bus_flow_blocked;
+       bool creditmap_received;
 };
 
 /*
@@ -507,7 +511,6 @@ static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
 {
        int i;
 
-       brcmf_dbg(TRACE, "enter\n");
        memset(hanger, 0, sizeof(*hanger));
        for (i = 0; i < ARRAY_SIZE(hanger->items); i++)
                hanger->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
@@ -517,7 +520,6 @@ static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h)
 {
        u32 i;
 
-       brcmf_dbg(TRACE, "enter\n");
        i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS;
 
        while (i != h->slot_pos) {
@@ -533,14 +535,12 @@ static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h)
        h->failed_slotfind++;
        i = BRCMF_FWS_HANGER_MAXITEMS;
 done:
-       brcmf_dbg(TRACE, "exit: %d\n", i);
        return i;
 }
 
 static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h,
-                                          struct sk_buff *pkt, u32 slot_id)
+                                   struct sk_buff *pkt, u32 slot_id)
 {
-       brcmf_dbg(TRACE, "enter\n");
        if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
                return -ENOENT;
 
@@ -560,7 +560,6 @@ static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
                                          u32 slot_id, struct sk_buff **pktout,
                                          bool remove_item)
 {
-       brcmf_dbg(TRACE, "enter\n");
        if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
                return -ENOENT;
 
@@ -574,23 +573,18 @@ static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
        if (remove_item) {
                h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
                h->items[slot_id].pkt = NULL;
-               h->items[slot_id].gen = 0xff;
                h->popped++;
        }
        return 0;
 }
 
 static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
-                                                  u32 slot_id, u8 gen)
+                                           u32 slot_id)
 {
-       brcmf_dbg(TRACE, "enter\n");
-
        if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
                return -ENOENT;
 
-       h->items[slot_id].gen = gen;
-
-       if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_INUSE) {
+       if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
                brcmf_err("entry not in use\n");
                return -EINVAL;
        }
@@ -599,25 +593,6 @@ static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
        return 0;
 }
 
-static int brcmf_fws_hanger_get_genbit(struct brcmf_fws_hanger *hanger,
-                                             struct sk_buff *pkt, u32 slot_id,
-                                             int *gen)
-{
-       brcmf_dbg(TRACE, "enter\n");
-       *gen = 0xff;
-
-       if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS)
-               return -ENOENT;
-
-       if (hanger->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) {
-               brcmf_err("slot not in use\n");
-               return -EINVAL;
-       }
-
-       *gen = hanger->items[slot_id].gen;
-       return 0;
-}
-
 static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws,
                                     bool (*fn)(struct sk_buff *, void *),
                                     int ifidx)
@@ -627,7 +602,6 @@ static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws,
        int i;
        enum brcmf_fws_hanger_item_state s;
 
-       brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
        for (i = 0; i < ARRAY_SIZE(h->items); i++) {
                s = h->items[i].state;
                if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE ||
@@ -644,14 +618,28 @@ static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws,
        }
 }
 
-static void brcmf_fws_init_mac_descriptor(struct brcmf_fws_mac_descriptor *desc,
-                                         u8 *addr, u8 ifidx)
+static void brcmf_fws_macdesc_set_name(struct brcmf_fws_info *fws,
+                                      struct brcmf_fws_mac_descriptor *desc)
+{
+       if (desc == &fws->desc.other)
+               strlcpy(desc->name, "MAC-OTHER", sizeof(desc->name));
+       else if (desc->mac_handle)
+               scnprintf(desc->name, sizeof(desc->name), "MAC-%d:%d",
+                         desc->mac_handle, desc->interface_id);
+       else
+               scnprintf(desc->name, sizeof(desc->name), "MACIF:%d",
+                         desc->interface_id);
+}
+
+static void brcmf_fws_macdesc_init(struct brcmf_fws_mac_descriptor *desc,
+                                  u8 *addr, u8 ifidx)
 {
        brcmf_dbg(TRACE,
                  "enter: desc %p ea=%pM, ifidx=%u\n", desc, addr, ifidx);
        desc->occupied = 1;
        desc->state = BRCMF_FWS_STATE_OPEN;
        desc->requested_credit = 0;
+       desc->requested_packet = 0;
        /* depending on use may need ifp->bssidx instead */
        desc->interface_id = ifidx;
        desc->ac_bitmap = 0xff; /* update this when handling APSD */
@@ -660,22 +648,22 @@ static void brcmf_fws_init_mac_descriptor(struct brcmf_fws_mac_descriptor *desc,
 }
 
 static
-void brcmf_fws_clear_mac_descriptor(struct brcmf_fws_mac_descriptor *desc)
+void brcmf_fws_macdesc_deinit(struct brcmf_fws_mac_descriptor *desc)
 {
        brcmf_dbg(TRACE,
                  "enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id);
        desc->occupied = 0;
        desc->state = BRCMF_FWS_STATE_CLOSE;
        desc->requested_credit = 0;
+       desc->requested_packet = 0;
 }
 
 static struct brcmf_fws_mac_descriptor *
-brcmf_fws_mac_descriptor_lookup(struct brcmf_fws_info *fws, u8 *ea)
+brcmf_fws_macdesc_lookup(struct brcmf_fws_info *fws, u8 *ea)
 {
        struct brcmf_fws_mac_descriptor *entry;
        int i;
 
-       brcmf_dbg(TRACE, "enter: ea=%pM\n", ea);
        if (ea == NULL)
                return ERR_PTR(-EINVAL);
 
@@ -690,42 +678,33 @@ brcmf_fws_mac_descriptor_lookup(struct brcmf_fws_info *fws, u8 *ea)
 }
 
 static struct brcmf_fws_mac_descriptor*
-brcmf_fws_find_mac_desc(struct brcmf_fws_info *fws, struct brcmf_if *ifp,
-                       u8 *da)
+brcmf_fws_macdesc_find(struct brcmf_fws_info *fws, struct brcmf_if *ifp, u8 *da)
 {
        struct brcmf_fws_mac_descriptor *entry = &fws->desc.other;
        bool multicast;
-       enum nl80211_iftype iftype;
-
-       brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
 
        multicast = is_multicast_ether_addr(da);
-       iftype = brcmf_cfg80211_get_iftype(ifp);
 
-       /* Multicast destination and P2P clients get the interface entry.
-        * STA gets the interface entry if there is no exact match. For
-        * example, TDLS destinations have their own entry.
+       /* Multicast destination, STA and P2P clients get the interface entry.
+        * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations
+        * have their own entry.
         */
-       entry = NULL;
-       if ((multicast || iftype == NL80211_IFTYPE_STATION ||
-            iftype == NL80211_IFTYPE_P2P_CLIENT) && ifp->fws_desc)
+       if (multicast && ifp->fws_desc) {
                entry = ifp->fws_desc;
-
-       if (entry != NULL && iftype != NL80211_IFTYPE_STATION)
                goto done;
+       }
 
-       entry = brcmf_fws_mac_descriptor_lookup(fws, da);
+       entry = brcmf_fws_macdesc_lookup(fws, da);
        if (IS_ERR(entry))
-               entry = &fws->desc.other;
+               entry = ifp->fws_desc;
 
 done:
-       brcmf_dbg(TRACE, "exit: entry=%p\n", entry);
        return entry;
 }
 
-static bool brcmf_fws_mac_desc_closed(struct brcmf_fws_info *fws,
-                                     struct brcmf_fws_mac_descriptor *entry,
-                                     int fifo)
+static bool brcmf_fws_macdesc_closed(struct brcmf_fws_info *fws,
+                                    struct brcmf_fws_mac_descriptor *entry,
+                                    int fifo)
 {
        struct brcmf_fws_mac_descriptor *if_entry;
        bool closed;
@@ -748,15 +727,11 @@ static bool brcmf_fws_mac_desc_closed(struct brcmf_fws_info *fws,
        return closed || !(entry->ac_bitmap & BIT(fifo));
 }
 
-static void brcmf_fws_mac_desc_cleanup(struct brcmf_fws_info *fws,
-                                      struct brcmf_fws_mac_descriptor *entry,
-                                      int ifidx)
+static void brcmf_fws_macdesc_cleanup(struct brcmf_fws_info *fws,
+                                     struct brcmf_fws_mac_descriptor *entry,
+                                     int ifidx)
 {
-       brcmf_dbg(TRACE, "enter: entry=(ea=%pM, ifid=%d), ifidx=%d\n",
-                 entry->ea, entry->interface_id, ifidx);
        if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) {
-               brcmf_dbg(TRACE, "flush psq: ifidx=%d, qlen=%d\n",
-                         ifidx, entry->psq.len);
                brcmf_fws_psq_flush(fws, &entry->psq, ifidx);
                entry->occupied = !!(entry->psq.len);
        }
@@ -772,7 +747,6 @@ static void brcmf_fws_bus_txq_cleanup(struct brcmf_fws_info *fws,
        int prec;
        u32 hslot;
 
-       brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
        txq = brcmf_bus_gettxq(fws->drvr->bus_if);
        if (IS_ERR(txq)) {
                brcmf_dbg(TRACE, "no txq to clean up\n");
@@ -798,7 +772,6 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
        struct brcmf_fws_mac_descriptor *table;
        bool (*matchfn)(struct sk_buff *, void *) = NULL;
 
-       brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
        if (fws == NULL)
                return;
 
@@ -808,51 +781,122 @@ static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx)
        /* cleanup individual nodes */
        table = &fws->desc.nodes[0];
        for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++)
-               brcmf_fws_mac_desc_cleanup(fws, &table[i], ifidx);
+               brcmf_fws_macdesc_cleanup(fws, &table[i], ifidx);
 
-       brcmf_fws_mac_desc_cleanup(fws, &fws->desc.other, ifidx);
+       brcmf_fws_macdesc_cleanup(fws, &fws->desc.other, ifidx);
        brcmf_fws_bus_txq_cleanup(fws, matchfn, ifidx);
        brcmf_fws_hanger_cleanup(fws, matchfn, ifidx);
 }
 
-static void brcmf_fws_tim_update(struct brcmf_fws_info *ctx,
-                                struct brcmf_fws_mac_descriptor *entry,
-                                int prec)
+static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
 {
-       brcmf_dbg(TRACE, "enter: ea=%pM\n", entry->ea);
-       if (entry->state == BRCMF_FWS_STATE_CLOSE) {
-               /* check delayedQ and suppressQ in one call using bitmap */
-               if (brcmu_pktq_mlen(&entry->psq, 3 << (prec * 2)) == 0)
-                       entry->traffic_pending_bmp =
-                               entry->traffic_pending_bmp & ~NBITVAL(prec);
-               else
-                       entry->traffic_pending_bmp =
-                               entry->traffic_pending_bmp | NBITVAL(prec);
+       struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
+       u8 *wlh;
+       u16 data_offset = 0;
+       u8 fillers;
+       __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
+
+       brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u (%u), pkttag=0x%08X, hslot=%d\n",
+                 entry->ea, entry->interface_id,
+                 brcmf_skb_if_flags_get_field(skb, INDEX),
+                 le32_to_cpu(pkttag), (le32_to_cpu(pkttag) >> 8) & 0xffff);
+       if (entry->send_tim_signal)
+               data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
+
+       /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+       data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN;
+       fillers = round_up(data_offset, 4) - data_offset;
+       data_offset += fillers;
+
+       skb_push(skb, data_offset);
+       wlh = skb->data;
+
+       wlh[0] = BRCMF_FWS_TYPE_PKTTAG;
+       wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN;
+       memcpy(&wlh[2], &pkttag, sizeof(pkttag));
+       wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2;
+
+       if (entry->send_tim_signal) {
+               entry->send_tim_signal = 0;
+               wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
+               wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
+               wlh[2] = entry->mac_handle;
+               wlh[3] = entry->traffic_pending_bmp;
+               brcmf_dbg(TRACE, "adding TIM info: %02X:%02X:%02X:%02X\n",
+                         wlh[0], wlh[1], wlh[2], wlh[3]);
+               wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2;
+               entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
        }
-       /* request a TIM update to firmware at the next piggyback opportunity */
+       if (fillers)
+               memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
+
+       brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX),
+                           data_offset >> 2, skb);
+       return 0;
+}
+
+static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
+                                struct brcmf_fws_mac_descriptor *entry,
+                                int fifo, bool send_immediately)
+{
+       struct sk_buff *skb;
+       struct brcmf_bus *bus;
+       struct brcmf_skbuff_cb *skcb;
+       s32 err;
+       u32 len;
+
+       /* check delayedQ and suppressQ in one call using bitmap */
+       if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0)
+               entry->traffic_pending_bmp &= ~NBITVAL(fifo);
+       else
+               entry->traffic_pending_bmp |= NBITVAL(fifo);
+
+       entry->send_tim_signal = false;
        if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp)
                entry->send_tim_signal = true;
+       if (send_immediately && entry->send_tim_signal &&
+           entry->state == BRCMF_FWS_STATE_CLOSE) {
+               /* create a dummy packet and sent that. The traffic          */
+               /* bitmap info will automatically be attached to that packet */
+               len = BRCMF_FWS_TYPE_PKTTAG_LEN + 2 +
+                     BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2 +
+                     4 + fws->drvr->hdrlen;
+               skb = brcmu_pkt_buf_get_skb(len);
+               if (skb == NULL)
+                       return false;
+               skb_pull(skb, len);
+               skcb = brcmf_skbcb(skb);
+               skcb->mac = entry;
+               skcb->state = BRCMF_FWS_SKBSTATE_TIM;
+               bus = fws->drvr->bus_if;
+               err = brcmf_fws_hdrpush(fws, skb);
+               if (err == 0)
+                       err = brcmf_bus_txdata(bus, skb);
+               if (err)
+                       brcmu_pkt_buf_free_skb(skb);
+               return true;
+       }
+       return false;
 }
 
 static void
 brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq,
                             u8 if_id)
 {
-       struct brcmf_if *ifp = fws->drvr->iflist[if_id];
+       struct brcmf_if *ifp = fws->drvr->iflist[!if_id ? 0 : if_id + 1];
 
        if (WARN_ON(!ifp))
                return;
 
-       brcmf_dbg(TRACE,
-                 "enter: bssidx=%d, ifidx=%d\n", ifp->bssidx, ifp->ifidx);
-
        if ((ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) &&
            pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER)
                brcmf_txflowblock_if(ifp,
                                     BRCMF_NETIF_STOP_REASON_FWS_FC, false);
        if (!(ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) &&
-           pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER)
+           pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER) {
+               fws->stats.fws_flow_block++;
                brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, true);
+       }
        return;
 }
 
@@ -876,34 +920,38 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
 
        entry = &fws->desc.nodes[mac_handle & 0x1F];
        if (type == BRCMF_FWS_TYPE_MACDESC_DEL) {
-               brcmf_dbg(TRACE, "deleting mac %pM idx %d\n", addr, ifidx);
                if (entry->occupied) {
-                       brcmf_fws_mac_desc_cleanup(fws, entry, -1);
-                       brcmf_fws_clear_mac_descriptor(entry);
+                       brcmf_dbg(TRACE, "deleting %s mac %pM\n",
+                                 entry->name, addr);
+                       brcmf_fws_macdesc_cleanup(fws, entry, -1);
+                       brcmf_fws_macdesc_deinit(entry);
                } else
                        fws->stats.mac_update_failed++;
                return 0;
        }
 
-       brcmf_dbg(TRACE,
-                 "add mac %pM handle %u idx %d\n", addr, mac_handle, ifidx);
-       existing = brcmf_fws_mac_descriptor_lookup(fws, addr);
+       existing = brcmf_fws_macdesc_lookup(fws, addr);
        if (IS_ERR(existing)) {
                if (!entry->occupied) {
                        entry->mac_handle = mac_handle;
-                       brcmf_fws_init_mac_descriptor(entry, addr, ifidx);
+                       brcmf_fws_macdesc_init(entry, addr, ifidx);
+                       brcmf_fws_macdesc_set_name(fws, entry);
                        brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
                                        BRCMF_FWS_PSQ_LEN);
+                       brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr);
                } else {
                        fws->stats.mac_update_failed++;
                }
        } else {
                if (entry != existing) {
-                       brcmf_dbg(TRACE, "relocate mac\n");
+                       brcmf_dbg(TRACE, "copy mac %s\n", existing->name);
                        memcpy(entry, existing,
                               offsetof(struct brcmf_fws_mac_descriptor, psq));
                        entry->mac_handle = mac_handle;
-                       brcmf_fws_clear_mac_descriptor(existing);
+                       brcmf_fws_macdesc_deinit(existing);
+                       brcmf_fws_macdesc_set_name(fws, entry);
+                       brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name,
+                                 addr);
                } else {
                        brcmf_dbg(TRACE, "use existing\n");
                        WARN_ON(entry->mac_handle != mac_handle);
@@ -918,7 +966,6 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
 {
        struct brcmf_fws_mac_descriptor *entry;
        u8 mac_handle;
-       int i;
 
        mac_handle = data[0];
        entry = &fws->desc.nodes[mac_handle & 0x1F];
@@ -926,16 +973,18 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
                fws->stats.mac_ps_update_failed++;
                return -ESRCH;
        }
-
-       /* a state update should wipe old credits? */
+       /* a state update should wipe old credits */
        entry->requested_credit = 0;
+       entry->requested_packet = 0;
        if (type == BRCMF_FWS_TYPE_MAC_OPEN) {
                entry->state = BRCMF_FWS_STATE_OPEN;
                return BRCMF_FWS_RET_OK_SCHEDULE;
        } else {
                entry->state = BRCMF_FWS_STATE_CLOSE;
-               for (i = BRCMF_FWS_FIFO_AC_BE; i < NL80211_NUM_ACS; i++)
-                       brcmf_fws_tim_update(fws, entry, i);
+               brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BK, false);
+               brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BE, false);
+               brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VI, false);
+               brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true);
        }
        return BRCMF_FWS_RET_OK_NOSCHEDULE;
 }
@@ -949,7 +998,6 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
 
        ifidx = data[0];
 
-       brcmf_dbg(TRACE, "enter: ifidx=%d\n", ifidx);
        if (ifidx >= BRCMF_MAX_IFS) {
                ret = -ERANGE;
                goto fail;
@@ -961,6 +1009,8 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
                goto fail;
        }
 
+       brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type,
+                 entry->name);
        switch (type) {
        case BRCMF_FWS_TYPE_INTERFACE_OPEN:
                entry->state = BRCMF_FWS_STATE_OPEN;
@@ -991,6 +1041,9 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
                return -ESRCH;
        }
 
+       brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n",
+                 brcmf_fws_get_tlv_name(type), type, entry->name,
+                 data[0], data[2]);
        if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
                entry->requested_credit = data[0];
        else
@@ -1000,6 +1053,37 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
        return BRCMF_FWS_RET_OK_SCHEDULE;
 }
 
+static void
+brcmf_fws_macdesc_use_req_credit(struct brcmf_fws_mac_descriptor *entry,
+                                struct sk_buff *skb)
+{
+       if (entry->requested_credit > 0) {
+               entry->requested_credit--;
+               brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
+               brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 1);
+               if (entry->state != BRCMF_FWS_STATE_CLOSE)
+                       brcmf_err("requested credit set while mac not closed!\n");
+       } else if (entry->requested_packet > 0) {
+               entry->requested_packet--;
+               brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
+               brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0);
+               if (entry->state != BRCMF_FWS_STATE_CLOSE)
+                       brcmf_err("requested packet set while mac not closed!\n");
+       } else {
+               brcmf_skb_if_flags_set_field(skb, REQUESTED, 0);
+               brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0);
+       }
+}
+
+static void brcmf_fws_macdesc_return_req_credit(struct sk_buff *skb)
+{
+       struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
+
+       if ((brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) &&
+           (entry->state == BRCMF_FWS_STATE_CLOSE))
+               entry->requested_credit++;
+}
+
 static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
                                     u8 fifo, u8 credits)
 {
@@ -1010,6 +1094,8 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
        if (!credits)
                return;
 
+       fws->fifo_credit_map |= 1 << fifo;
+
        if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
            (fws->credits_borrowed[0])) {
                for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0;
@@ -1031,7 +1117,6 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
                }
        }
 
-       fws->fifo_credit_map |= 1 << fifo;
        fws->fifo_credit[fifo] += credits;
 }
 
@@ -1042,27 +1127,6 @@ static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws)
                queue_work(fws->fws_wq, &fws->fws_dequeue_work);
 }
 
-static void brcmf_skb_pick_up_credit(struct brcmf_fws_info *fws, int fifo,
-                                    struct sk_buff *p)
-{
-       struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(p)->mac;
-
-       if (brcmf_skbcb(p)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
-               if (fws->fcmode != BRCMF_FWS_FCMODE_IMPLIED_CREDIT)
-                       return;
-               brcmf_fws_return_credits(fws, fifo, 1);
-       } else {
-               /*
-                * if this packet did not count against FIFO credit, it
-                * must have taken a requested_credit from the destination
-                * entry (for pspoll etc.)
-                */
-               if (!brcmf_skb_if_flags_get_field(p, REQUESTED))
-                       entry->requested_credit++;
-       }
-       brcmf_fws_schedule_deq(fws);
-}
-
 static int brcmf_fws_enq(struct brcmf_fws_info *fws,
                         enum brcmf_fws_skb_state state, int fifo,
                         struct sk_buff *p)
@@ -1078,7 +1142,7 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
                return -ENOENT;
        }
 
-       brcmf_dbg(TRACE, "enter: ea=%pM, qlen=%d\n", entry->ea, entry->psq.len);
+       brcmf_dbg(DATA, "enter: fifo %d skb %p\n", fifo, p);
        if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
                prec += 1;
                qfull_stat = &fws->stats.supprq_full_error;
@@ -1095,14 +1159,12 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws,
 
        /* update the sk_buff state */
        brcmf_skbcb(p)->state = state;
-       if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED)
-               entry->suppress_count++;
 
        /*
         * A packet has been pushed so update traffic
         * availability bitmap, if applicable
         */
-       brcmf_fws_tim_update(fws, entry, fifo);
+       brcmf_fws_tim_update(fws, entry, fifo, true);
        brcmf_fws_flow_control_check(fws, &entry->psq,
                                     brcmf_skb_if_flags_get_field(p, INDEX));
        return 0;
@@ -1113,7 +1175,6 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
        struct brcmf_fws_mac_descriptor *table;
        struct brcmf_fws_mac_descriptor *entry;
        struct sk_buff *p;
-       int use_credit = 1;
        int num_nodes;
        int node_pos;
        int prec_out;
@@ -1127,7 +1188,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
        for (i = 0; i < num_nodes; i++) {
                entry = &table[(node_pos + i) % num_nodes];
                if (!entry->occupied ||
-                   brcmf_fws_mac_desc_closed(fws, entry, fifo))
+                   brcmf_fws_macdesc_closed(fws, entry, fifo))
                        continue;
 
                if (entry->suppressed)
@@ -1137,9 +1198,8 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
                p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out);
                if (p == NULL) {
                        if (entry->suppressed) {
-                               if (entry->suppr_transit_count >
-                                   entry->suppress_count)
-                                       return NULL;
+                               if (entry->suppr_transit_count)
+                                       continue;
                                entry->suppressed = false;
                                p = brcmu_pktq_mdeq(&entry->psq,
                                                    1 << (fifo * 2), &prec_out);
@@ -1148,26 +1208,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
                if  (p == NULL)
                        continue;
 
-               /* did the packet come from suppress sub-queue? */
-               if (entry->requested_credit > 0) {
-                       entry->requested_credit--;
-                       /*
-                        * if the packet was pulled out while destination is in
-                        * closed state but had a non-zero packets requested,
-                        * then this should not count against the FIFO credit.
-                        * That is due to the fact that the firmware will
-                        * most likely hold onto this packet until a suitable
-                        * time later to push it to the appropriate AC FIFO.
-                        */
-                       if (entry->state == BRCMF_FWS_STATE_CLOSE)
-                               use_credit = 0;
-               } else if (entry->requested_packet > 0) {
-                       entry->requested_packet--;
-                       brcmf_skb_if_flags_set_field(p, REQUESTED, 1);
-                       if (entry->state == BRCMF_FWS_STATE_CLOSE)
-                               use_credit = 0;
-               }
-               brcmf_skb_if_flags_set_field(p, CREDITCHECK, use_credit);
+               brcmf_fws_macdesc_use_req_credit(entry, p);
 
                /* move dequeue position to ensure fair round-robin */
                fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes;
@@ -1179,7 +1220,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
                 * A packet has been picked up, update traffic
                 * availability bitmap, if applicable
                 */
-               brcmf_fws_tim_update(fws, entry, fifo);
+               brcmf_fws_tim_update(fws, entry, fifo, false);
 
                /*
                 * decrement total enqueued fifo packets and
@@ -1192,7 +1233,7 @@ static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo)
        }
        p = NULL;
 done:
-       brcmf_dbg(TRACE, "exit: fifo %d skb %p\n", fifo, p);
+       brcmf_dbg(DATA, "exit: fifo %d skb %p\n", fifo, p);
        return p;
 }
 
@@ -1202,22 +1243,26 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
        struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
        u32 hslot;
        int ret;
+       u8 ifidx;
 
        hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
 
        /* this packet was suppressed */
-       if (!entry->suppressed || entry->generation != genbit) {
+       if (!entry->suppressed) {
                entry->suppressed = true;
-               entry->suppress_count = brcmu_pktq_mlen(&entry->psq,
-                                                       1 << (fifo * 2 + 1));
                entry->suppr_transit_count = entry->transit_count;
+               brcmf_dbg(DATA, "suppress %s: transit %d\n",
+                         entry->name, entry->transit_count);
        }
 
        entry->generation = genbit;
 
-       ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb);
+       ret = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
+       if (ret == 0)
+               ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo,
+                                   skb);
        if (ret != 0) {
-               /* suppress q is full, drop this packet */
+               /* suppress q is full or hdrpull failed, drop this packet */
                brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
                                        true);
        } else {
@@ -1225,26 +1270,24 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
                 * Mark suppressed to avoid a double free during
                 * wlfc cleanup
                 */
-               brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot,
-                                                genbit);
-               entry->suppress_count++;
+               brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot);
        }
 
        return ret;
 }
 
 static int
-brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
+brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
                           u32 genbit)
 {
        u32 fifo;
        int ret;
        bool remove_from_hanger = true;
        struct sk_buff *skb;
+       struct brcmf_skbuff_cb *skcb;
        struct brcmf_fws_mac_descriptor *entry = NULL;
 
-       brcmf_dbg(TRACE, "status: flags=0x%X, hslot=%d\n",
-                 flags, hslot);
+       brcmf_dbg(DATA, "flags %d\n", flags);
 
        if (flags == BRCMF_FWS_TXSTATUS_DISCARD)
                fws->stats.txs_discard++;
@@ -1256,6 +1299,8 @@ brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
                remove_from_hanger = false;
        } else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED)
                fws->stats.txs_tossed++;
+       else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)
+               fws->stats.txs_host_tossed++;
        else
                brcmf_err("unexpected txstatus\n");
 
@@ -1266,26 +1311,35 @@ brcmf_fws_txstatus_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
                return ret;
        }
 
-       entry = brcmf_skbcb(skb)->mac;
+       skcb = brcmf_skbcb(skb);
+       entry = skcb->mac;
        if (WARN_ON(!entry)) {
                brcmu_pkt_buf_free_skb(skb);
                return -EINVAL;
        }
+       entry->transit_count--;
+       if (entry->suppressed && entry->suppr_transit_count)
+               entry->suppr_transit_count--;
+
+       brcmf_dbg(DATA, "%s flags %X htod %X\n", entry->name, skcb->if_flags,
+                 skcb->htod);
 
        /* pick up the implicit credit from this packet */
        fifo = brcmf_skb_htod_tag_get_field(skb, FIFO);
-       brcmf_skb_pick_up_credit(fws, fifo, skb);
+       if ((fws->fcmode == BRCMF_FWS_FCMODE_IMPLIED_CREDIT) ||
+           (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) ||
+           (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)) {
+               brcmf_fws_return_credits(fws, fifo, 1);
+               brcmf_fws_schedule_deq(fws);
+       }
+       brcmf_fws_macdesc_return_req_credit(skb);
 
        if (!remove_from_hanger)
                ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit);
 
-       if (remove_from_hanger || ret) {
-               entry->transit_count--;
-               if (entry->suppressed)
-                       entry->suppr_transit_count--;
-
+       if (remove_from_hanger || ret)
                brcmf_txfinalize(fws->drvr, skb, true);
-       }
+
        return 0;
 }
 
@@ -1299,11 +1353,11 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
                return BRCMF_FWS_RET_OK_NOSCHEDULE;
        }
 
-       brcmf_dbg(TRACE, "enter: data %pM\n", data);
+       brcmf_dbg(DATA, "enter: data %pM\n", data);
        for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++)
                brcmf_fws_return_credits(fws, i, data[i]);
 
-       brcmf_dbg(INFO, "map: credit %x delay %x\n", fws->fifo_credit_map,
+       brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map,
                  fws->fifo_delay_map);
        return BRCMF_FWS_RET_OK_SCHEDULE;
 }
@@ -1323,7 +1377,7 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
        hslot = brcmf_txstatus_get_field(status, HSLOT);
        genbit = brcmf_txstatus_get_field(status, GENERATION);
 
-       return brcmf_fws_txstatus_process(fws, flags, hslot, genbit);
+       return brcmf_fws_txs_process(fws, flags, hslot, genbit);
 }
 
 static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
@@ -1331,7 +1385,7 @@ static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
        __le32 timestamp;
 
        memcpy(&timestamp, &data[2], sizeof(timestamp));
-       brcmf_dbg(INFO, "received: seq %d, timestamp %d\n", data[1],
+       brcmf_dbg(CTL, "received: seq %d, timestamp %d\n", data[1],
                  le32_to_cpu(timestamp));
        return 0;
 }
@@ -1364,6 +1418,10 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
                brcmf_err("event payload too small (%d)\n", e->datalen);
                return -EINVAL;
        }
+       if (fws->creditmap_received)
+               return 0;
+
+       fws->creditmap_received = true;
 
        brcmf_dbg(TRACE, "enter: credits %pM\n", credits);
        brcmf_fws_lock(ifp->drvr, flags);
@@ -1379,6 +1437,20 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
        return 0;
 }
 
+static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
+                                               const struct brcmf_event_msg *e,
+                                               void *data)
+{
+       struct brcmf_fws_info *fws = ifp->drvr->fws;
+       ulong flags;
+
+       brcmf_fws_lock(ifp->drvr, flags);
+       if (fws)
+               fws->bcmc_credit_check = true;
+       brcmf_fws_unlock(ifp->drvr, flags);
+       return 0;
+}
+
 int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
                      struct sk_buff *skb)
 {
@@ -1392,7 +1464,7 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
        s32 status;
        s32 err;
 
-       brcmf_dbg(TRACE, "enter: ifidx %d, skblen %u, sig %d\n",
+       brcmf_dbg(HDRS, "enter: ifidx %d, skblen %u, sig %d\n",
                  ifidx, skb->len, signal_len);
 
        WARN_ON(signal_len > skb->len);
@@ -1426,14 +1498,15 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
                len = signal_data[1];
                data = signal_data + 2;
 
-               brcmf_dbg(INFO, "tlv type=%d (%s), len=%d, data[0]=%d\n", type,
-                         brcmf_fws_get_tlv_name(type), len, *data);
+               brcmf_dbg(HDRS, "tlv type=%s (%d), len=%d (%d)\n",
+                         brcmf_fws_get_tlv_name(type), type, len,
+                         brcmf_fws_get_tlv_len(fws, type));
 
                /* abort parsing when length invalid */
                if (data_len < len + 2)
                        break;
 
-               if (len != brcmf_fws_get_tlv_len(fws, type))
+               if (len < brcmf_fws_get_tlv_len(fws, type))
                        break;
 
                err = BRCMF_FWS_RET_OK_NOSCHEDULE;
@@ -1502,64 +1575,32 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
        return 0;
 }
 
-static int brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb)
-{
-       struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
-       u8 *wlh;
-       u16 data_offset = 0;
-       u8 fillers;
-       __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod);
-
-       brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u, pkttag=0x%08X\n",
-                 entry->ea, entry->interface_id, le32_to_cpu(pkttag));
-       if (entry->send_tim_signal)
-               data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
-
-       /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
-       data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN;
-       fillers = round_up(data_offset, 4) - data_offset;
-       data_offset += fillers;
-
-       skb_push(skb, data_offset);
-       wlh = skb->data;
-
-       wlh[0] = BRCMF_FWS_TYPE_PKTTAG;
-       wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN;
-       memcpy(&wlh[2], &pkttag, sizeof(pkttag));
-       wlh += BRCMF_FWS_TYPE_PKTTAG_LEN + 2;
-
-       if (entry->send_tim_signal) {
-               entry->send_tim_signal = 0;
-               wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP;
-               wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN;
-               wlh[2] = entry->mac_handle;
-               wlh[3] = entry->traffic_pending_bmp;
-               wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2;
-               entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
-       }
-       if (fillers)
-               memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers);
-
-       brcmf_proto_hdrpush(fws->drvr, brcmf_skb_if_flags_get_field(skb, INDEX),
-                           data_offset >> 2, skb);
-       return 0;
-}
-
 static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
                                   struct sk_buff *p)
 {
        struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p);
        struct brcmf_fws_mac_descriptor *entry = skcb->mac;
        int rc = 0;
-       bool header_needed;
+       bool first_time;
        int hslot = BRCMF_FWS_HANGER_MAXITEMS;
        u8 free_ctr;
-       u8 ifidx;
        u8 flags;
 
-       header_needed = skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED;
+       first_time = skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED;
 
-       if (header_needed) {
+       brcmf_skb_if_flags_set_field(p, TRANSMIT, 1);
+       brcmf_skb_htod_tag_set_field(p, FIFO, fifo);
+       brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation);
+       flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST;
+       if (brcmf_skb_if_flags_get_field(p, REQUESTED)) {
+               /*
+                * Indicate that this packet is being sent in response to an
+                * explicit request from the firmware side.
+                */
+               flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED;
+       }
+       brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
+       if (first_time) {
                /* obtaining free slot may fail, but that will be caught
                 * by the hanger push. This assures the packet has a BDC
                 * header upon return.
@@ -1568,47 +1609,20 @@ static int brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
                free_ctr = entry->seq[fifo];
                brcmf_skb_htod_tag_set_field(p, HSLOT, hslot);
                brcmf_skb_htod_tag_set_field(p, FREERUN, free_ctr);
-               brcmf_skb_htod_tag_set_field(p, GENERATION, 1);
-               entry->transit_count++;
-       }
-       brcmf_skb_if_flags_set_field(p, TRANSMIT, 1);
-       brcmf_skb_htod_tag_set_field(p, FIFO, fifo);
-
-       flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST;
-       if (!(skcb->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)) {
-               /*
-               Indicate that this packet is being sent in response to an
-               explicit request from the firmware side.
-               */
-               flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED;
-       }
-       brcmf_skb_htod_tag_set_field(p, FLAGS, flags);
-       if (header_needed) {
-               brcmf_fws_hdrpush(fws, p);
                rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot);
                if (rc)
                        brcmf_err("hanger push failed: rc=%d\n", rc);
-       } else {
-               int gen;
-
-               /* remove old header */
-               rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, p);
-               if (rc == 0) {
-                       hslot = brcmf_skb_htod_tag_get_field(p, HSLOT);
-                       brcmf_fws_hanger_get_genbit(&fws->hanger, p,
-                                                   hslot, &gen);
-                       brcmf_skb_htod_tag_set_field(p, GENERATION, gen);
-
-                       /* push new header */
-                       brcmf_fws_hdrpush(fws, p);
-               }
        }
 
+       if (rc == 0)
+               brcmf_fws_hdrpush(fws, p);
+
        return rc;
 }
 
 static void
-brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb)
+brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
+                      struct sk_buff *skb, int fifo)
 {
        /*
        put the packet back to the head of queue
@@ -1622,13 +1636,11 @@ brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb)
        enum brcmf_fws_skb_state state;
        struct sk_buff *pktout;
        int rc = 0;
-       int fifo;
        int hslot;
-       u8 ifidx;
 
-       fifo = brcmf_skb_if_flags_get_field(skb, FIFO);
        state = brcmf_skbcb(skb)->state;
        entry = brcmf_skbcb(skb)->mac;
+       hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
 
        if (entry != NULL) {
                if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) {
@@ -1640,19 +1652,6 @@ brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb)
                                rc = -ENOSPC;
                        }
                } else {
-                       hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
-
-                       /* remove header first */
-                       rc = brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
-                       if (rc) {
-                               brcmf_err("header removal failed\n");
-                               /* free the hanger slot */
-                               brcmf_fws_hanger_poppkt(&fws->hanger, hslot,
-                                                       &pktout, true);
-                               rc = -EINVAL;
-                               goto fail;
-                       }
-
                        /* delay-q packets are going to delay-q */
                        pktout = brcmu_pktq_penq_head(&entry->psq,
                                                      2 * fifo, skb);
@@ -1668,33 +1667,30 @@ brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb)
                        /* decrement sequence count */
                        entry->seq[fifo]--;
                }
-               /*
-               if this packet did not count against FIFO credit, it must have
-               taken a requested_credit from the firmware (for pspoll etc.)
-               */
-               if (!(brcmf_skbcb(skb)->if_flags &
-                     BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK))
-                       entry->requested_credit++;
        } else {
                brcmf_err("no mac entry linked\n");
                rc = -ENOENT;
        }
 
-
-fail:
        if (rc) {
-               brcmf_txfinalize(fws->drvr, skb, false);
                fws->stats.rollback_failed++;
-       } else
+               brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED,
+                                     hslot, 0);
+       } else {
                fws->stats.rollback_success++;
+               brcmf_fws_return_credits(fws, fifo, 1);
+               brcmf_fws_macdesc_return_req_credit(skb);
+       }
 }
 
 static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
 {
        int lender_ac;
 
-       if (time_after(fws->borrow_defer_timestamp, jiffies))
+       if (time_after(fws->borrow_defer_timestamp, jiffies)) {
+               fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE);
                return -ENAVAIL;
+       }
 
        for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) {
                if (fws->fifo_credit[lender_ac]) {
@@ -1702,10 +1698,12 @@ static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
                        fws->fifo_credit[lender_ac]--;
                        if (fws->fifo_credit[lender_ac] == 0)
                                fws->fifo_credit_map &= ~(1 << lender_ac);
-                       brcmf_dbg(TRACE, "borrow credit from: %d\n", lender_ac);
+                       fws->fifo_credit_map |= (1 << BRCMF_FWS_FIFO_AC_BE);
+                       brcmf_dbg(DATA, "borrow credit from: %d\n", lender_ac);
                        return 0;
                }
        }
+       fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE);
        return -ENAVAIL;
 }
 
@@ -1714,33 +1712,6 @@ static int brcmf_fws_consume_credit(struct brcmf_fws_info *fws, int fifo,
 {
        struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
        int *credit = &fws->fifo_credit[fifo];
-       int use_credit = 1;
-
-       brcmf_dbg(TRACE, "enter: ac=%d, credits=%d\n", fifo, *credit);
-
-       if (entry->requested_credit > 0) {
-               /*
-                * if the packet was pulled out while destination is in
-                * closed state but had a non-zero packets requested,
-                * then this should not count against the FIFO credit.
-                * That is due to the fact that the firmware will
-                * most likely hold onto this packet until a suitable
-                * time later to push it to the appropriate AC FIFO.
-                */
-               entry->requested_credit--;
-               if (entry->state == BRCMF_FWS_STATE_CLOSE)
-                       use_credit = 0;
-       } else if (entry->requested_packet > 0) {
-               entry->requested_packet--;
-               brcmf_skb_if_flags_set_field(skb, REQUESTED, 1);
-               if (entry->state == BRCMF_FWS_STATE_CLOSE)
-                       use_credit = 0;
-       }
-       brcmf_skb_if_flags_set_field(skb, CREDITCHECK, use_credit);
-       if (!use_credit) {
-               brcmf_dbg(TRACE, "exit: no creditcheck set\n");
-               return 0;
-       }
 
        if (fifo != BRCMF_FWS_FIFO_AC_BE)
                fws->borrow_defer_timestamp = jiffies +
@@ -1748,17 +1719,22 @@ static int brcmf_fws_consume_credit(struct brcmf_fws_info *fws, int fifo,
 
        if (!(*credit)) {
                /* Try to borrow a credit from other queue */
-               if (fifo == BRCMF_FWS_FIFO_AC_BE &&
-                   brcmf_fws_borrow_credit(fws) == 0)
-                       return 0;
-
-               brcmf_dbg(TRACE, "exit: ac=%d, credits depleted\n", fifo);
-               return -ENAVAIL;
+               if (fifo != BRCMF_FWS_FIFO_AC_BE ||
+                   (brcmf_fws_borrow_credit(fws) != 0)) {
+                       brcmf_dbg(DATA, "ac=%d, credits depleted\n", fifo);
+                       return -ENAVAIL;
+               }
+       } else {
+               (*credit)--;
+               if (!(*credit))
+                       fws->fifo_credit_map &= ~(1 << fifo);
        }
-       (*credit)--;
-       if (!(*credit))
-               fws->fifo_credit_map &= ~(1 << fifo);
-       brcmf_dbg(TRACE, "exit: ac=%d, credits=%d\n", fifo, *credit);
+
+       brcmf_fws_macdesc_use_req_credit(entry, skb);
+
+       brcmf_dbg(DATA, "ac=%d, credits=%02d:%02d:%02d:%02d\n", fifo,
+                 fws->fifo_credit[0], fws->fifo_credit[1],
+                 fws->fifo_credit[2], fws->fifo_credit[3]);
        return 0;
 }
 
@@ -1769,6 +1745,7 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
        struct brcmf_fws_mac_descriptor *entry;
        struct brcmf_bus *bus = fws->drvr->bus_if;
        int rc;
+       u8 ifidx;
 
        entry = skcb->mac;
        if (IS_ERR(entry))
@@ -1780,21 +1757,27 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
                goto rollback;
        }
 
+       brcmf_dbg(DATA, "%s flags %X htod %X\n", entry->name, skcb->if_flags,
+                 skcb->htod);
        rc = brcmf_bus_txdata(bus, skb);
-       if (rc < 0)
+       if (rc < 0) {
+               brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
                goto rollback;
+       }
 
+       entry->transit_count++;
+       if (entry->suppressed)
+               entry->suppr_transit_count++;
        entry->seq[fifo]++;
        fws->stats.pkt2bus++;
-       if (brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
-               fws->stats.send_pkts[fifo]++;
-               fws->stats.fifo_credits_sent[fifo]++;
-       }
+       fws->stats.send_pkts[fifo]++;
+       if (brcmf_skb_if_flags_get_field(skb, REQUESTED))
+               fws->stats.requested_sent[fifo]++;
 
        return rc;
 
 rollback:
-       brcmf_fws_rollback_toq(fws, skb);
+       brcmf_fws_rollback_toq(fws, skb, fifo);
        return rc;
 }
 
@@ -1826,19 +1809,25 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
 
        /* set control buffer information */
        skcb->if_flags = 0;
-       skcb->mac = brcmf_fws_find_mac_desc(fws, ifp, eh->h_dest);
+       skcb->mac = brcmf_fws_macdesc_find(fws, ifp, eh->h_dest);
        skcb->state = BRCMF_FWS_SKBSTATE_NEW;
        brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx);
        if (!multicast)
                fifo = brcmf_fws_prio2fifo[skb->priority];
-       brcmf_skb_if_flags_set_field(skb, FIFO, fifo);
 
-       brcmf_dbg(TRACE, "ea=%pM, multi=%d, fifo=%d\n", eh->h_dest,
-                 multicast, fifo);
+       brcmf_dbg(DATA, "%s mac %pM multi %d fifo %d\n", skcb->mac->name,
+                 eh->h_dest, multicast, fifo);
 
        brcmf_fws_lock(drvr, flags);
+       /* multicast credit support is conditional, setting
+        * flag to false to assure credit is consumed below.
+        */
+       if (fws->bcmc_credit_check)
+               multicast = false;
+
        if (skcb->mac->suppressed ||
-           brcmf_fws_mac_desc_closed(fws, skcb->mac, fifo) ||
+           fws->bus_flow_blocked ||
+           brcmf_fws_macdesc_closed(fws, skcb->mac, fifo) ||
            brcmu_pktq_mlen(&skcb->mac->psq, 3 << (fifo * 2)) ||
            (!multicast &&
             brcmf_fws_consume_credit(fws, fifo, skb) < 0)) {
@@ -1846,9 +1835,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
                drvr->fws->fifo_delay_map |= 1 << fifo;
                brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb);
        } else {
-               if (brcmf_fws_commit_skb(fws, fifo, skb))
-                       if (!multicast)
-                               brcmf_skb_pick_up_credit(fws, fifo, skb);
+               brcmf_fws_commit_skb(fws, fifo, skb);
        }
        brcmf_fws_unlock(drvr, flags);
        return 0;
@@ -1862,7 +1849,7 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp)
        if (!entry)
                return;
 
-       brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx);
+       brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx);
 }
 
 void brcmf_fws_add_interface(struct brcmf_if *ifp)
@@ -1870,16 +1857,16 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
        struct brcmf_fws_info *fws = ifp->drvr->fws;
        struct brcmf_fws_mac_descriptor *entry;
 
-       brcmf_dbg(TRACE, "enter: idx=%d, mac=%pM\n",
-                 ifp->bssidx, ifp->mac_addr);
        if (!ifp->ndev || !ifp->drvr->fw_signals)
                return;
 
        entry = &fws->desc.iface[ifp->ifidx];
        ifp->fws_desc = entry;
-       brcmf_fws_init_mac_descriptor(entry, ifp->mac_addr, ifp->ifidx);
+       brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx);
+       brcmf_fws_macdesc_set_name(fws, entry);
        brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
                        BRCMF_FWS_PSQ_LEN);
+       brcmf_dbg(TRACE, "added %s\n", entry->name);
 }
 
 void brcmf_fws_del_interface(struct brcmf_if *ifp)
@@ -1887,13 +1874,13 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp)
        struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
        ulong flags;
 
-       brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
        if (!entry)
                return;
 
        brcmf_fws_lock(ifp->drvr, flags);
        ifp->fws_desc = NULL;
-       brcmf_fws_clear_mac_descriptor(entry);
+       brcmf_dbg(TRACE, "deleting %s\n", entry->name);
+       brcmf_fws_macdesc_deinit(entry);
        brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
        brcmf_fws_unlock(ifp->drvr, flags);
 }
@@ -1904,39 +1891,37 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
        struct sk_buff *skb;
        ulong flags;
        int fifo;
-       int credit;
 
        fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
 
-       brcmf_dbg(TRACE, "enter: fws=%p\n", fws);
        brcmf_fws_lock(fws->drvr, flags);
-       for (fifo = NL80211_NUM_ACS; fifo >= 0; fifo--) {
-               brcmf_dbg(TRACE, "fifo %d credit %d\n", fifo,
-                         fws->fifo_credit[fifo]);
-               for (credit = 0; credit < fws->fifo_credit[fifo]; /* nop */) {
+       for (fifo = NL80211_NUM_ACS; fifo >= 0 && !fws->bus_flow_blocked;
+            fifo--) {
+               while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) &&
+                      (fifo == BRCMF_FWS_FIFO_BCMC))) {
                        skb = brcmf_fws_deq(fws, fifo);
-                       if (!skb || brcmf_fws_commit_skb(fws, fifo, skb))
+                       if (!skb)
+                               break;
+                       fws->fifo_credit[fifo]--;
+                       if (brcmf_fws_commit_skb(fws, fifo, skb))
+                               break;
+                       if (fws->bus_flow_blocked)
                                break;
-                       if (brcmf_skbcb(skb)->if_flags &
-                           BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK)
-                               credit++;
                }
                if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
-                   (credit == fws->fifo_credit[fifo])) {
-                       fws->fifo_credit[fifo] -= credit;
+                   (fws->fifo_credit[fifo] == 0) &&
+                   (!fws->bus_flow_blocked)) {
                        while (brcmf_fws_borrow_credit(fws) == 0) {
                                skb = brcmf_fws_deq(fws, fifo);
                                if (!skb) {
                                        brcmf_fws_return_credits(fws, fifo, 1);
                                        break;
                                }
-                               if (brcmf_fws_commit_skb(fws, fifo, skb)) {
-                                       brcmf_fws_return_credits(fws, fifo, 1);
+                               if (brcmf_fws_commit_skb(fws, fifo, skb))
+                                       break;
+                               if (fws->bus_flow_blocked)
                                        break;
-                               }
                        }
-               } else {
-                       fws->fifo_credit[fifo] -= credit;
                }
        }
        brcmf_fws_unlock(fws->drvr, flags);
@@ -1982,6 +1967,13 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
                brcmf_err("register credit map handler failed\n");
                goto fail;
        }
+       rc = brcmf_fweh_register(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT,
+                                brcmf_fws_notify_bcmc_credit_support);
+       if (rc < 0) {
+               brcmf_err("register bcmc credit handler failed\n");
+               brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
+               goto fail;
+       }
 
        /* setting the iovar may fail if feature is unsupported
         * so leave the rc as is so driver initialization can
@@ -1993,19 +1985,20 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
        }
 
        brcmf_fws_hanger_init(&drvr->fws->hanger);
-       brcmf_fws_init_mac_descriptor(&drvr->fws->desc.other, NULL, 0);
+       brcmf_fws_macdesc_init(&drvr->fws->desc.other, NULL, 0);
+       brcmf_fws_macdesc_set_name(drvr->fws, &drvr->fws->desc.other);
        brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
                        BRCMF_FWS_PSQ_LEN);
 
        /* create debugfs file for statistics */
        brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats);
 
-       /* TODO: remove upon feature delivery */
-       brcmf_err("%s bdcv2 tlv signaling [%x]\n",
+       brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
                  drvr->fw_signals ? "enabled" : "disabled", tlv);
        return 0;
 
 fail_event:
+       brcmf_fweh_unregister(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT);
        brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
 fail:
        brcmf_fws_deinit(drvr);
@@ -2043,25 +2036,31 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
        if (!fws)
                return false;
 
-       brcmf_dbg(TRACE, "enter: mode=%d\n", fws->fcmode);
        return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
 }
 
 void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
 {
        ulong flags;
+       u32 hslot;
 
-       brcmf_fws_lock(fws->drvr, flags);
-       brcmf_fws_txstatus_process(fws, BRCMF_FWS_TXSTATUS_FW_TOSSED,
-                                  brcmf_skb_htod_tag_get_field(skb, HSLOT), 0);
-       /* the packet never reached firmware so reclaim credit */
-       if (fws->fcmode == BRCMF_FWS_FCMODE_EXPLICIT_CREDIT &&
-           brcmf_skbcb(skb)->if_flags & BRCMF_SKB_IF_FLAGS_CREDITCHECK_MASK) {
-               brcmf_fws_return_credits(fws,
-                                        brcmf_skb_htod_tag_get_field(skb,
-                                                                     FIFO),
-                                        1);
-               brcmf_fws_schedule_deq(fws);
+       if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) {
+               brcmu_pkt_buf_free_skb(skb);
+               return;
        }
+       brcmf_fws_lock(fws->drvr, flags);
+       hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+       brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0);
        brcmf_fws_unlock(fws->drvr, flags);
 }
+
+void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
+{
+       struct brcmf_fws_info *fws = drvr->fws;
+
+       fws->bus_flow_blocked = flow_blocked;
+       if (!flow_blocked)
+               brcmf_fws_schedule_deq(fws);
+       else
+               fws->stats.bus_flow_block++;
+}
index fbe483d23752ca2e2a0ae3b0ab1f6169b613809f..9fc860910bd8571abd9f42040b6918f9f7fb57e5 100644 (file)
@@ -29,5 +29,6 @@ void brcmf_fws_reset_interface(struct brcmf_if *ifp);
 void brcmf_fws_add_interface(struct brcmf_if *ifp);
 void brcmf_fws_del_interface(struct brcmf_if *ifp);
 void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
+void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
 
 #endif /* FWSIGNAL_H_ */
index 7c1b6332747ea6e7b4f1287868a71d2232fac4ad..793df66fe0bfa887721635b19acc7064f74a22a9 100644 (file)
@@ -170,7 +170,6 @@ struct brcmf_sdio_dev {
        atomic_t suspend;               /* suspend flag */
        wait_queue_head_t request_byte_wait;
        wait_queue_head_t request_word_wait;
-       wait_queue_head_t request_chain_wait;
        wait_queue_head_t request_buffer_wait;
        struct device *dev;
        struct brcmf_bus *bus_if;
@@ -272,16 +271,6 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
                         uint rw, uint fnc, uint addr,
                         u32 *word, uint nbyte);
 
-/* read or write any buffer using cmd53 */
-extern int
-brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
-                          uint fix_inc, uint rw, uint fnc_num, u32 addr,
-                          struct sk_buff *pkt);
-extern int
-brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
-                         uint write, uint func, uint addr,
-                         struct sk_buff_head *pktq);
-
 /* Watchdog timer interface for pm ops */
 extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
                                    bool enable);
@@ -291,4 +280,8 @@ extern void brcmf_sdbrcm_disconnect(void *ptr);
 extern void brcmf_sdbrcm_isr(void *arg);
 
 extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+
+extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+                                wait_queue_head_t *wq);
+extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
 #endif                         /* _BRCM_SDH_H_ */
index 9df1f7a681e07e9ee32b15672e7ba9de3a795fef..bc29171128991a9836d8237c556be00482f66d00 100644 (file)
@@ -87,6 +87,27 @@ TRACE_EVENT(brcmf_hexdump,
        TP_printk("hexdump [length=%lu]", __entry->len)
 );
 
+TRACE_EVENT(brcmf_bdchdr,
+       TP_PROTO(void *data),
+       TP_ARGS(data),
+       TP_STRUCT__entry(
+               __field(u8, flags)
+               __field(u8, prio)
+               __field(u8, flags2)
+               __field(u32, siglen)
+               __dynamic_array(u8, signal, *((u8 *)data + 3) * 4)
+       ),
+       TP_fast_assign(
+               __entry->flags = *(u8 *)data;
+               __entry->prio = *((u8 *)data + 1);
+               __entry->flags2 = *((u8 *)data + 2);
+               __entry->siglen = *((u8 *)data + 3) * 4;
+               memcpy(__get_dynamic_array(signal),
+                      (u8 *)data + 4, __entry->siglen);
+       ),
+       TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
+);
+
 #ifdef CONFIG_BRCM_TRACING
 
 #undef TRACE_INCLUDE_PATH
index 01aed7ad6bec17ec4f2dc453518f3a72079f5c76..322cadc51deddcbfe8d7c560dbc93669b167b574 100644 (file)
@@ -82,6 +82,7 @@ struct brcmf_usbdev_info {
        int tx_high_watermark;
        int tx_freecount;
        bool tx_flowblock;
+       spinlock_t tx_flowblock_lock;
 
        struct brcmf_usbreq *tx_reqs;
        struct brcmf_usbreq *rx_reqs;
@@ -411,6 +412,7 @@ static void brcmf_usb_tx_complete(struct urb *urb)
 {
        struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
        struct brcmf_usbdev_info *devinfo = req->devinfo;
+       unsigned long flags;
 
        brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
                  req->skb);
@@ -419,11 +421,13 @@ static void brcmf_usb_tx_complete(struct urb *urb)
        brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
        req->skb = NULL;
        brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
+       spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
        if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
                devinfo->tx_flowblock) {
                brcmf_txflowblock(devinfo->dev, false);
                devinfo->tx_flowblock = false;
        }
+       spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
 }
 
 static void brcmf_usb_rx_complete(struct urb *urb)
@@ -568,6 +572,7 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
        struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
        struct brcmf_usbreq  *req;
        int ret;
+       unsigned long flags;
 
        brcmf_dbg(USB, "Enter, skb=%p\n", skb);
        if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
@@ -599,11 +604,13 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
                goto fail;
        }
 
+       spin_lock_irqsave(&devinfo->tx_flowblock_lock, flags);
        if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
            !devinfo->tx_flowblock) {
                brcmf_txflowblock(dev, true);
                devinfo->tx_flowblock = true;
        }
+       spin_unlock_irqrestore(&devinfo->tx_flowblock_lock, flags);
        return 0;
 
 fail:
@@ -1164,6 +1171,7 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
 
        /* Initialize the spinlocks */
        spin_lock_init(&devinfo->qlock);
+       spin_lock_init(&devinfo->tx_flowblock_lock);
 
        INIT_LIST_HEAD(&devinfo->rx_freeq);
        INIT_LIST_HEAD(&devinfo->rx_postq);
index 301e572e89233649b5af905c7e965b27222d6490..277b37ae71267a194d6ebcc541b93cc786ef9078 100644 (file)
@@ -3982,6 +3982,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        struct brcmf_fil_af_params_le *af_params;
        bool ack;
        s32 chan_nr;
+       u32 freq;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -3994,6 +3995,8 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                return -EPERM;
        }
 
+       vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
+
        if (ieee80211_is_probe_resp(mgmt->frame_control)) {
                /* Right now the only reason to get a probe response */
                /* is for p2p listen response or for p2p GO from     */
@@ -4009,7 +4012,6 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                ie_offset =  DOT11_MGMT_HDR_LEN +
                             DOT11_BCN_PRB_FIXED_LEN;
                ie_len = len - ie_offset;
-               vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
                if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif)
                        vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
                err = brcmf_vif_set_mgmt_ie(vif,
@@ -4033,16 +4035,22 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN);
                /* Add the length exepted for 802.11 header  */
                action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN);
-               /* Add the channel */
-               chan_nr = ieee80211_frequency_to_channel(chan->center_freq);
+               /* Add the channel. Use the one specified as parameter if any or
+                * the current one (got from the firmware) otherwise
+                */
+               if (chan)
+                       freq = chan->center_freq;
+               else
+                       brcmf_fil_cmd_int_get(vif->ifp, BRCMF_C_GET_CHANNEL,
+                                             &freq);
+               chan_nr = ieee80211_frequency_to_channel(freq);
                af_params->channel = cpu_to_le32(chan_nr);
 
                memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
                       le16_to_cpu(action_frame->len));
 
                brcmf_dbg(TRACE, "Action frame, cookie=%lld, len=%d, freq=%d\n",
-                         *cookie, le16_to_cpu(action_frame->len),
-                         chan->center_freq);
+                         *cookie, le16_to_cpu(action_frame->len), freq);
 
                ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
                                                  af_params);
index 1585cc5bf866b847e86f6210074b4c89b6c0b26f..bd982856d3853b4c6415b43c137d76b3eb4eeabf 100644 (file)
@@ -900,7 +900,7 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
                if (supr_status) {
                        update_rate = false;
                        if (supr_status == TX_STATUS_SUPR_BADCH) {
-                               brcms_err(wlc->hw->d11core,
+                               brcms_dbg_ht(wlc->hw->d11core,
                                          "%s: Pkt tx suppressed, illegal channel possibly %d\n",
                                          __func__, CHSPEC_CHANNEL(
                                          wlc->default_bss->chanspec));
diff --git a/drivers/net/wireless/cw1200/Kconfig b/drivers/net/wireless/cw1200/Kconfig
new file mode 100644 (file)
index 0000000..0880742
--- /dev/null
@@ -0,0 +1,30 @@
+config CW1200
+       tristate "CW1200 WLAN support"
+       depends on MAC80211 && CFG80211
+       help
+         This is a driver for the ST-E CW1100 & CW1200 WLAN chipsets.
+         This option just enables the driver core, see below for
+         specific bus support.
+
+if CW1200
+
+config CW1200_WLAN_SDIO
+       tristate "Support SDIO platforms"
+       depends on CW1200 && MMC
+       help
+         Enable support for the CW1200 connected via an SDIO bus.
+         By default this driver only supports the Sagrad SG901-1091/1098 EVK
+         and similar designs that utilize a hardware reset circuit. To
+         support different CW1200 SDIO designs you will need to override
+         the default platform data by calling cw1200_sdio_set_platform_data()
+         in your board setup file.
+
+config CW1200_WLAN_SPI
+       tristate "Support SPI platforms"
+       depends on CW1200 && SPI
+       help
+         Enables support for the CW1200 connected via a SPI bus.  You will
+         need to add appropriate platform data glue in your board setup
+         file.
+
+endif
diff --git a/drivers/net/wireless/cw1200/Makefile b/drivers/net/wireless/cw1200/Makefile
new file mode 100644 (file)
index 0000000..b086aac
--- /dev/null
@@ -0,0 +1,21 @@
+cw1200_core-y := \
+               fwio.o \
+               txrx.o \
+               main.o \
+               queue.o \
+               hwio.o \
+               bh.o \
+               wsm.o \
+               sta.o \
+               scan.o \
+               debug.o
+cw1200_core-$(CONFIG_PM)       += pm.o
+
+# CFLAGS_sta.o += -DDEBUG
+
+cw1200_wlan_sdio-y := cw1200_sdio.o
+cw1200_wlan_spi-y := cw1200_spi.o
+
+obj-$(CONFIG_CW1200) += cw1200_core.o
+obj-$(CONFIG_CW1200_WLAN_SDIO) += cw1200_wlan_sdio.o
+obj-$(CONFIG_CW1200_WLAN_SPI) += cw1200_wlan_spi.o
diff --git a/drivers/net/wireless/cw1200/bh.c b/drivers/net/wireless/cw1200/bh.c
new file mode 100644 (file)
index 0000000..c1ec2a4
--- /dev/null
@@ -0,0 +1,619 @@
+/*
+ * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver, which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <net/mac80211.h>
+#include <linux/kthread.h>
+#include <linux/timer.h>
+
+#include "cw1200.h"
+#include "bh.h"
+#include "hwio.h"
+#include "wsm.h"
+#include "hwbus.h"
+#include "debug.h"
+#include "fwio.h"
+
+static int cw1200_bh(void *arg);
+
+#define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4)
+/* an SPI message cannot be bigger than (2"12-1)*2 bytes
+ * "*2" to cvt to bytes
+ */
+#define MAX_SZ_RD_WR_BUFFERS   (DOWNLOAD_BLOCK_SIZE_WR*2)
+#define PIGGYBACK_CTRL_REG     (2)
+#define EFFECTIVE_BUF_SIZE     (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
+
+/* Suspend state privates */
+enum cw1200_bh_pm_state {
+       CW1200_BH_RESUMED = 0,
+       CW1200_BH_SUSPEND,
+       CW1200_BH_SUSPENDED,
+       CW1200_BH_RESUME,
+};
+
+typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv,
+       u8 *data, size_t size);
+
+static void cw1200_bh_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+       container_of(work, struct cw1200_common, bh_work);
+       cw1200_bh(priv);
+}
+
+int cw1200_register_bh(struct cw1200_common *priv)
+{
+       int err = 0;
+       /* Realtime workqueue */
+       priv->bh_workqueue = alloc_workqueue("cw1200_bh",
+                               WQ_MEM_RECLAIM | WQ_HIGHPRI
+                               | WQ_CPU_INTENSIVE, 1);
+
+       if (!priv->bh_workqueue)
+               return -ENOMEM;
+
+       INIT_WORK(&priv->bh_work, cw1200_bh_work);
+
+       pr_debug("[BH] register.\n");
+
+       atomic_set(&priv->bh_rx, 0);
+       atomic_set(&priv->bh_tx, 0);
+       atomic_set(&priv->bh_term, 0);
+       atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
+       priv->bh_error = 0;
+       priv->hw_bufs_used = 0;
+       priv->buf_id_tx = 0;
+       priv->buf_id_rx = 0;
+       init_waitqueue_head(&priv->bh_wq);
+       init_waitqueue_head(&priv->bh_evt_wq);
+
+       err = !queue_work(priv->bh_workqueue, &priv->bh_work);
+       WARN_ON(err);
+       return err;
+}
+
+void cw1200_unregister_bh(struct cw1200_common *priv)
+{
+       atomic_add(1, &priv->bh_term);
+       wake_up(&priv->bh_wq);
+
+       flush_workqueue(priv->bh_workqueue);
+
+       destroy_workqueue(priv->bh_workqueue);
+       priv->bh_workqueue = NULL;
+
+       pr_debug("[BH] unregistered.\n");
+}
+
+void cw1200_irq_handler(struct cw1200_common *priv)
+{
+       pr_debug("[BH] irq.\n");
+
+       /* Disable Interrupts! */
+       /* NOTE:  hwbus_ops->lock already held */
+       __cw1200_irq_enable(priv, 0);
+
+       if (/* WARN_ON */(priv->bh_error))
+               return;
+
+       if (atomic_add_return(1, &priv->bh_rx) == 1)
+               wake_up(&priv->bh_wq);
+}
+EXPORT_SYMBOL_GPL(cw1200_irq_handler);
+
+void cw1200_bh_wakeup(struct cw1200_common *priv)
+{
+       pr_debug("[BH] wakeup.\n");
+       if (priv->bh_error) {
+               pr_err("[BH] wakeup failed (BH error)\n");
+               return;
+       }
+
+       if (atomic_add_return(1, &priv->bh_tx) == 1)
+               wake_up(&priv->bh_wq);
+}
+
+int cw1200_bh_suspend(struct cw1200_common *priv)
+{
+       pr_debug("[BH] suspend.\n");
+       if (priv->bh_error) {
+               wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n");
+               return -EINVAL;
+       }
+
+       atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
+       wake_up(&priv->bh_wq);
+       return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
+               (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
+                1 * HZ) ? 0 : -ETIMEDOUT;
+}
+
+int cw1200_bh_resume(struct cw1200_common *priv)
+{
+       pr_debug("[BH] resume.\n");
+       if (priv->bh_error) {
+               wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n");
+               return -EINVAL;
+       }
+
+       atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
+       wake_up(&priv->bh_wq);
+       return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
+               (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
+               1 * HZ) ? 0 : -ETIMEDOUT;
+}
+
+static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
+{
+       ++priv->hw_bufs_used;
+}
+
+int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
+{
+       int ret = 0;
+       int hw_bufs_used = priv->hw_bufs_used;
+
+       priv->hw_bufs_used -= count;
+       if (WARN_ON(priv->hw_bufs_used < 0))
+               ret = -1;
+       else if (hw_bufs_used >= priv->wsm_caps.input_buffers)
+               ret = 1;
+       if (!priv->hw_bufs_used)
+               wake_up(&priv->bh_evt_wq);
+       return ret;
+}
+
+static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
+                                         u16 *ctrl_reg)
+{
+       int ret;
+
+       ret = cw1200_reg_read_16(priv,
+                       ST90TDS_CONTROL_REG_ID, ctrl_reg);
+       if (ret) {
+               ret = cw1200_reg_read_16(priv,
+                               ST90TDS_CONTROL_REG_ID, ctrl_reg);
+               if (ret)
+                       pr_err("[BH] Failed to read control register.\n");
+       }
+
+       return ret;
+}
+
+static int cw1200_device_wakeup(struct cw1200_common *priv)
+{
+       u16 ctrl_reg;
+       int ret;
+
+       pr_debug("[BH] Device wakeup.\n");
+
+       /* First, set the dpll register */
+       ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
+                                 cw1200_dpll_from_clk(priv->hw_refclk));
+       if (WARN_ON(ret))
+               return ret;
+
+       /* To force the device to be always-on, the host sets WLAN_UP to 1 */
+       ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
+                       ST90TDS_CONT_WUP_BIT);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
+       if (WARN_ON(ret))
+               return ret;
+
+       /* If the device returns WLAN_RDY as 1, the device is active and will
+        * remain active.
+        */
+       if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
+               pr_debug("[BH] Device awake.\n");
+               return 1;
+       }
+
+       return 0;
+}
+
+/* Must be called from BH thraed. */
+void cw1200_enable_powersave(struct cw1200_common *priv,
+                            bool enable)
+{
+       pr_debug("[BH] Powerave is %s.\n",
+                enable ? "enabled" : "disabled");
+       priv->powersave_enabled = enable;
+}
+
+static int cw1200_bh_rx_helper(struct cw1200_common *priv,
+                              uint16_t *ctrl_reg,
+                              int *tx)
+{
+       size_t read_len = 0;
+       struct sk_buff *skb_rx = NULL;
+       struct wsm_hdr *wsm;
+       size_t wsm_len;
+       u16 wsm_id;
+       u8 wsm_seq;
+       int rx_resync = 1;
+
+       size_t alloc_len;
+       u8 *data;
+
+       read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
+       if (!read_len)
+               return 0; /* No more work */
+
+       if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
+                   (read_len > EFFECTIVE_BUF_SIZE))) {
+               pr_debug("Invalid read len: %zu (%04x)",
+                        read_len, *ctrl_reg);
+               goto err;
+       }
+
+       /* Add SIZE of PIGGYBACK reg (CONTROL Reg)
+        * to the NEXT Message length + 2 Bytes for SKB
+        */
+       read_len = read_len + 2;
+
+       alloc_len = priv->hwbus_ops->align_size(
+               priv->hwbus_priv, read_len);
+
+       /* Check if not exceeding CW1200 capabilities */
+       if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
+               pr_debug("Read aligned len: %zu\n",
+                        alloc_len);
+       }
+
+       skb_rx = dev_alloc_skb(alloc_len);
+       if (WARN_ON(!skb_rx))
+               goto err;
+
+       skb_trim(skb_rx, 0);
+       skb_put(skb_rx, read_len);
+       data = skb_rx->data;
+       if (WARN_ON(!data))
+               goto err;
+
+       if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) {
+               pr_err("rx blew up, len %zu\n", alloc_len);
+               goto err;
+       }
+
+       /* Piggyback */
+       *ctrl_reg = __le16_to_cpu(
+               ((__le16 *)data)[alloc_len / 2 - 1]);
+
+       wsm = (struct wsm_hdr *)data;
+       wsm_len = __le16_to_cpu(wsm->len);
+       if (WARN_ON(wsm_len > read_len))
+               goto err;
+
+       if (priv->wsm_enable_wsm_dumps)
+               print_hex_dump_bytes("<-- ",
+                                    DUMP_PREFIX_NONE,
+                                    data, wsm_len);
+
+       wsm_id  = __le16_to_cpu(wsm->id) & 0xFFF;
+       wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7;
+
+       skb_trim(skb_rx, wsm_len);
+
+       if (wsm_id == 0x0800) {
+               wsm_handle_exception(priv,
+                                    &data[sizeof(*wsm)],
+                                    wsm_len - sizeof(*wsm));
+               goto err;
+       } else if (!rx_resync) {
+               if (WARN_ON(wsm_seq != priv->wsm_rx_seq))
+                       goto err;
+       }
+       priv->wsm_rx_seq = (wsm_seq + 1) & 7;
+       rx_resync = 0;
+
+       if (wsm_id & 0x0400) {
+               int rc = wsm_release_tx_buffer(priv, 1);
+               if (WARN_ON(rc < 0))
+                       return rc;
+               else if (rc > 0)
+                       *tx = 1;
+       }
+
+       /* cw1200_wsm_rx takes care on SKB livetime */
+       if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
+               goto err;
+
+       if (skb_rx) {
+               dev_kfree_skb(skb_rx);
+               skb_rx = NULL;
+       }
+
+       return 0;
+
+err:
+       if (skb_rx) {
+               dev_kfree_skb(skb_rx);
+               skb_rx = NULL;
+       }
+       return -1;
+}
+
+static int cw1200_bh_tx_helper(struct cw1200_common *priv,
+                              int *pending_tx,
+                              int *tx_burst)
+{
+       size_t tx_len;
+       u8 *data;
+       int ret;
+       struct wsm_hdr *wsm;
+
+       if (priv->device_can_sleep) {
+               ret = cw1200_device_wakeup(priv);
+               if (WARN_ON(ret < 0)) { /* Error in wakeup */
+                       *pending_tx = 1;
+                       return 0;
+               } else if (ret) { /* Woke up */
+                       priv->device_can_sleep = false;
+               } else { /* Did not awake */
+                       *pending_tx = 1;
+                       return 0;
+               }
+       }
+
+       wsm_alloc_tx_buffer(priv);
+       ret = wsm_get_tx(priv, &data, &tx_len, tx_burst);
+       if (ret <= 0) {
+               wsm_release_tx_buffer(priv, 1);
+               if (WARN_ON(ret < 0))
+                       return ret; /* Error */
+               return 0; /* No work */
+       }
+
+       wsm = (struct wsm_hdr *)data;
+       BUG_ON(tx_len < sizeof(*wsm));
+       BUG_ON(__le16_to_cpu(wsm->len) != tx_len);
+
+       atomic_add(1, &priv->bh_tx);
+
+       tx_len = priv->hwbus_ops->align_size(
+               priv->hwbus_priv, tx_len);
+
+       /* Check if not exceeding CW1200 capabilities */
+       if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE))
+               pr_debug("Write aligned len: %zu\n", tx_len);
+
+       wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX));
+       wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq));
+
+       if (WARN_ON(cw1200_data_write(priv, data, tx_len))) {
+               pr_err("tx blew up, len %zu\n", tx_len);
+               wsm_release_tx_buffer(priv, 1);
+               return -1; /* Error */
+       }
+
+       if (priv->wsm_enable_wsm_dumps)
+               print_hex_dump_bytes("--> ",
+                                    DUMP_PREFIX_NONE,
+                                    data,
+                                    __le16_to_cpu(wsm->len));
+
+       wsm_txed(priv, data);
+       priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
+
+       if (*tx_burst > 1) {
+               cw1200_debug_tx_burst(priv);
+               return 1; /* Work remains */
+       }
+
+       return 0;
+}
+
+static int cw1200_bh(void *arg)
+{
+       struct cw1200_common *priv = arg;
+       int rx, tx, term, suspend;
+       u16 ctrl_reg = 0;
+       int tx_allowed;
+       int pending_tx = 0;
+       int tx_burst;
+       long status;
+       u32 dummy;
+       int ret;
+
+       for (;;) {
+               if (!priv->hw_bufs_used &&
+                   priv->powersave_enabled &&
+                   !priv->device_can_sleep &&
+                   !atomic_read(&priv->recent_scan)) {
+                       status = 1 * HZ;
+                       pr_debug("[BH] Device wakedown. No data.\n");
+                       cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0);
+                       priv->device_can_sleep = true;
+               } else if (priv->hw_bufs_used) {
+                       /* Interrupt loss detection */
+                       status = 1 * HZ;
+               } else {
+                       status = MAX_SCHEDULE_TIMEOUT;
+               }
+
+               /* Dummy Read for SDIO retry mechanism*/
+               if ((priv->hw_type != -1) &&
+                   (atomic_read(&priv->bh_rx) == 0) &&
+                   (atomic_read(&priv->bh_tx) == 0))
+                       cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
+                                       &dummy, sizeof(dummy));
+
+               pr_debug("[BH] waiting ...\n");
+               status = wait_event_interruptible_timeout(priv->bh_wq, ({
+                               rx = atomic_xchg(&priv->bh_rx, 0);
+                               tx = atomic_xchg(&priv->bh_tx, 0);
+                               term = atomic_xchg(&priv->bh_term, 0);
+                               suspend = pending_tx ?
+                                       0 : atomic_read(&priv->bh_suspend);
+                               (rx || tx || term || suspend || priv->bh_error);
+                       }), status);
+
+               pr_debug("[BH] - rx: %d, tx: %d, term: %d, suspend: %d, status: %ld\n",
+                        rx, tx, term, suspend, status);
+
+               /* Did an error occur? */
+               if ((status < 0 && status != -ERESTARTSYS) ||
+                   term || priv->bh_error) {
+                       break;
+               }
+               if (!status) {  /* wait_event timed out */
+                       unsigned long timestamp = jiffies;
+                       long timeout;
+                       int pending = 0;
+                       int i;
+
+                       /* Check to see if we have any outstanding frames */
+                       if (priv->hw_bufs_used && (!rx || !tx)) {
+                               wiphy_warn(priv->hw->wiphy,
+                                          "Missed interrupt? (%d frames outstanding)\n",
+                                          priv->hw_bufs_used);
+                               rx = 1;
+
+                               /* Get a timestamp of "oldest" frame */
+                               for (i = 0; i < 4; ++i)
+                                       pending += cw1200_queue_get_xmit_timestamp(
+                                               &priv->tx_queue[i],
+                                               &timestamp,
+                                               priv->pending_frame_id);
+
+                               /* Check if frame transmission is timed out.
+                                * Add an extra second with respect to possible
+                                * interrupt loss.
+                                */
+                               timeout = timestamp +
+                                       WSM_CMD_LAST_CHANCE_TIMEOUT +
+                                       1 * HZ  -
+                                       jiffies;
+
+                               /* And terminate BH thread if the frame is "stuck" */
+                               if (pending && timeout < 0) {
+                                       wiphy_warn(priv->hw->wiphy,
+                                                  "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n",
+                                                  priv->hw_bufs_used, pending,
+                                                  timestamp, jiffies);
+                                       break;
+                               }
+                       } else if (!priv->device_can_sleep &&
+                                  !atomic_read(&priv->recent_scan)) {
+                               pr_debug("[BH] Device wakedown. Timeout.\n");
+                               cw1200_reg_write_16(priv,
+                                                   ST90TDS_CONTROL_REG_ID, 0);
+                               priv->device_can_sleep = true;
+                       }
+                       goto done;
+               } else if (suspend) {
+                       pr_debug("[BH] Device suspend.\n");
+                       if (priv->powersave_enabled) {
+                               pr_debug("[BH] Device wakedown. Suspend.\n");
+                               cw1200_reg_write_16(priv,
+                                                   ST90TDS_CONTROL_REG_ID, 0);
+                               priv->device_can_sleep = true;
+                       }
+
+                       atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
+                       wake_up(&priv->bh_evt_wq);
+                       status = wait_event_interruptible(priv->bh_wq,
+                                                         CW1200_BH_RESUME == atomic_read(&priv->bh_suspend));
+                       if (status < 0) {
+                               wiphy_err(priv->hw->wiphy,
+                                         "Failed to wait for resume: %ld.\n",
+                                         status);
+                               break;
+                       }
+                       pr_debug("[BH] Device resume.\n");
+                       atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
+                       wake_up(&priv->bh_evt_wq);
+                       atomic_add(1, &priv->bh_rx);
+                       goto done;
+               }
+
+       rx:
+               tx += pending_tx;
+               pending_tx = 0;
+
+               if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
+                       break;
+
+               /* Don't bother trying to rx unless we have data to read */
+               if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
+                       ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
+                       if (ret < 0)
+                               break;
+                       /* Double up here if there's more data.. */
+                       if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
+                               ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
+                               if (ret < 0)
+                                       break;
+                       }
+               }
+
+       tx:
+               if (tx) {
+                       tx = 0;
+
+                       BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers);
+                       tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used;
+                       tx_allowed = tx_burst > 0;
+
+                       if (!tx_allowed) {
+                               /* Buffers full.  Ensure we process tx
+                                * after we handle rx..
+                                */
+                               pending_tx = tx;
+                               goto done_rx;
+                       }
+                       ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst);
+                       if (ret < 0)
+                               break;
+                       if (ret > 0) /* More to transmit */
+                               tx = ret;
+
+                       /* Re-read ctrl reg */
+                       if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
+                               break;
+               }
+
+       done_rx:
+               if (priv->bh_error)
+                       break;
+               if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
+                       goto rx;
+               if (tx)
+                       goto tx;
+
+       done:
+               /* Re-enable device interrupts */
+               priv->hwbus_ops->lock(priv->hwbus_priv);
+               __cw1200_irq_enable(priv, 1);
+               priv->hwbus_ops->unlock(priv->hwbus_priv);
+       }
+
+       /* Explicitly disable device interrupts */
+       priv->hwbus_ops->lock(priv->hwbus_priv);
+       __cw1200_irq_enable(priv, 0);
+       priv->hwbus_ops->unlock(priv->hwbus_priv);
+
+       if (!term) {
+               pr_err("[BH] Fatal error, exiting.\n");
+               priv->bh_error = 1;
+               /* TODO: schedule_work(recovery) */
+       }
+       return 0;
+}
diff --git a/drivers/net/wireless/cw1200/bh.h b/drivers/net/wireless/cw1200/bh.h
new file mode 100644 (file)
index 0000000..af6a485
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Device handling thread interface for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_BH_H
+#define CW1200_BH_H
+
+/* extern */ struct cw1200_common;
+
+int cw1200_register_bh(struct cw1200_common *priv);
+void cw1200_unregister_bh(struct cw1200_common *priv);
+void cw1200_irq_handler(struct cw1200_common *priv);
+void cw1200_bh_wakeup(struct cw1200_common *priv);
+int cw1200_bh_suspend(struct cw1200_common *priv);
+int cw1200_bh_resume(struct cw1200_common *priv);
+/* Must be called from BH thread. */
+void cw1200_enable_powersave(struct cw1200_common *priv,
+                            bool enable);
+int wsm_release_tx_buffer(struct cw1200_common *priv, int count);
+
+#endif /* CW1200_BH_H */
diff --git a/drivers/net/wireless/cw1200/cw1200.h b/drivers/net/wireless/cw1200/cw1200.h
new file mode 100644 (file)
index 0000000..1ad7d36
--- /dev/null
@@ -0,0 +1,323 @@
+/*
+ * Common private data for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on the mac80211 Prism54 code, which is
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ *
+ * Based on the islsm (softmac prism54) driver, which is:
+ * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_H
+#define CW1200_H
+
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <net/mac80211.h>
+
+#include "queue.h"
+#include "wsm.h"
+#include "scan.h"
+#include "txrx.h"
+#include "pm.h"
+
+/* Forward declarations */
+struct hwbus_ops;
+struct task_struct;
+struct cw1200_debug_priv;
+struct firmware;
+
+#define CW1200_MAX_CTRL_FRAME_LEN      (0x1000)
+
+#define CW1200_MAX_STA_IN_AP_MODE      (5)
+#define CW1200_LINK_ID_AFTER_DTIM      (CW1200_MAX_STA_IN_AP_MODE + 1)
+#define CW1200_LINK_ID_UAPSD           (CW1200_MAX_STA_IN_AP_MODE + 2)
+#define CW1200_LINK_ID_MAX             (CW1200_MAX_STA_IN_AP_MODE + 3)
+#define CW1200_MAX_REQUEUE_ATTEMPTS    (5)
+
+#define CW1200_MAX_TID                 (8)
+
+#define CW1200_BLOCK_ACK_CNT           (30)
+#define CW1200_BLOCK_ACK_THLD          (800)
+#define CW1200_BLOCK_ACK_HIST          (3)
+#define CW1200_BLOCK_ACK_INTERVAL      (1 * HZ / CW1200_BLOCK_ACK_HIST)
+
+#define CW1200_JOIN_TIMEOUT            (1 * HZ)
+#define CW1200_AUTH_TIMEOUT            (5 * HZ)
+
+struct cw1200_ht_info {
+       struct ieee80211_sta_ht_cap     ht_cap;
+       enum nl80211_channel_type       channel_type;
+       u16                             operation_mode;
+};
+
+/* Please keep order */
+enum cw1200_join_status {
+       CW1200_JOIN_STATUS_PASSIVE = 0,
+       CW1200_JOIN_STATUS_MONITOR,
+       CW1200_JOIN_STATUS_JOINING,
+       CW1200_JOIN_STATUS_PRE_STA,
+       CW1200_JOIN_STATUS_STA,
+       CW1200_JOIN_STATUS_IBSS,
+       CW1200_JOIN_STATUS_AP,
+};
+
+enum cw1200_link_status {
+       CW1200_LINK_OFF,
+       CW1200_LINK_RESERVE,
+       CW1200_LINK_SOFT,
+       CW1200_LINK_HARD,
+       CW1200_LINK_RESET,
+       CW1200_LINK_RESET_REMAP,
+};
+
+extern int cw1200_power_mode;
+extern const char * const cw1200_fw_types[];
+
+struct cw1200_link_entry {
+       unsigned long                   timestamp;
+       enum cw1200_link_status         status;
+       enum cw1200_link_status         prev_status;
+       u8                              mac[ETH_ALEN];
+       u8                              buffered[CW1200_MAX_TID];
+       struct sk_buff_head             rx_queue;
+};
+
+struct cw1200_common {
+       /* interfaces to the rest of the stack */
+       struct ieee80211_hw             *hw;
+       struct ieee80211_vif            *vif;
+       struct device                   *pdev;
+
+       /* Statistics */
+       struct ieee80211_low_level_stats stats;
+
+       /* Our macaddr */
+       u8 mac_addr[ETH_ALEN];
+
+       /* Hardware interface */
+       const struct hwbus_ops          *hwbus_ops;
+       struct hwbus_priv               *hwbus_priv;
+
+       /* Hardware information */
+       enum {
+               HIF_9000_SILICON_VERSATILE = 0,
+               HIF_8601_VERSATILE,
+               HIF_8601_SILICON,
+       } hw_type;
+       enum {
+               CW1200_HW_REV_CUT10 = 10,
+               CW1200_HW_REV_CUT11 = 11,
+               CW1200_HW_REV_CUT20 = 20,
+               CW1200_HW_REV_CUT22 = 22,
+               CW1X60_HW_REV       = 40,
+       } hw_revision;
+       int                             hw_refclk;
+       bool                            hw_have_5ghz;
+       const struct firmware           *sdd;
+       char                            *sdd_path;
+
+       struct cw1200_debug_priv        *debug;
+
+       struct workqueue_struct         *workqueue;
+       struct mutex                    conf_mutex;
+
+       struct cw1200_queue             tx_queue[4];
+       struct cw1200_queue_stats       tx_queue_stats;
+       int                             tx_burst_idx;
+
+       /* firmware/hardware info */
+       unsigned int tx_hdr_len;
+
+       /* Radio data */
+       int output_power;
+
+       /* BBP/MAC state */
+       struct ieee80211_rate           *rates;
+       struct ieee80211_rate           *mcs_rates;
+       struct ieee80211_channel        *channel;
+       struct wsm_edca_params          edca;
+       struct wsm_tx_queue_params      tx_queue_params;
+       struct wsm_mib_association_mode association_mode;
+       struct wsm_set_bss_params       bss_params;
+       struct cw1200_ht_info           ht_info;
+       struct wsm_set_pm               powersave_mode;
+       struct wsm_set_pm               firmware_ps_mode;
+       int                             cqm_rssi_thold;
+       unsigned                        cqm_rssi_hyst;
+       bool                            cqm_use_rssi;
+       int                             cqm_beacon_loss_count;
+       int                             channel_switch_in_progress;
+       wait_queue_head_t               channel_switch_done;
+       u8                              long_frame_max_tx_count;
+       u8                              short_frame_max_tx_count;
+       int                             mode;
+       bool                            enable_beacon;
+       int                             beacon_int;
+       bool                            listening;
+       struct wsm_rx_filter            rx_filter;
+       struct wsm_mib_multicast_filter multicast_filter;
+       bool                            has_multicast_subscription;
+       bool                            disable_beacon_filter;
+       struct work_struct              update_filtering_work;
+       struct work_struct              set_beacon_wakeup_period_work;
+
+       u8                              ba_rx_tid_mask;
+       u8                              ba_tx_tid_mask;
+
+       struct cw1200_pm_state          pm_state;
+
+       struct wsm_p2p_ps_modeinfo      p2p_ps_modeinfo;
+       struct wsm_uapsd_info           uapsd_info;
+       bool                            setbssparams_done;
+       bool                            bt_present;
+       u8                              conf_listen_interval;
+       u32                             listen_interval;
+       u32                             erp_info;
+       u32                             rts_threshold;
+
+       /* BH */
+       atomic_t                        bh_rx;
+       atomic_t                        bh_tx;
+       atomic_t                        bh_term;
+       atomic_t                        bh_suspend;
+
+       struct workqueue_struct         *bh_workqueue;
+       struct work_struct              bh_work;
+
+       int                             bh_error;
+       wait_queue_head_t               bh_wq;
+       wait_queue_head_t               bh_evt_wq;
+       u8                              buf_id_tx;
+       u8                              buf_id_rx;
+       u8                              wsm_rx_seq;
+       u8                              wsm_tx_seq;
+       int                             hw_bufs_used;
+       bool                            powersave_enabled;
+       bool                            device_can_sleep;
+
+       /* Scan status */
+       struct cw1200_scan scan;
+       /* Keep cw1200 awake (WUP = 1) 1 second after each scan to avoid
+        * FW issue with sleeping/waking up.
+        */
+       atomic_t                        recent_scan;
+       struct delayed_work             clear_recent_scan_work;
+
+       /* WSM */
+       struct wsm_startup_ind          wsm_caps;
+       struct mutex                    wsm_cmd_mux;
+       struct wsm_buf                  wsm_cmd_buf;
+       struct wsm_cmd                  wsm_cmd;
+       wait_queue_head_t               wsm_cmd_wq;
+       wait_queue_head_t               wsm_startup_done;
+       int                             firmware_ready;
+       atomic_t                        tx_lock;
+
+       /* WSM debug */
+       int                             wsm_enable_wsm_dumps;
+
+       /* WSM Join */
+       enum cw1200_join_status join_status;
+       u32                     pending_frame_id;
+       bool                    join_pending;
+       struct delayed_work     join_timeout;
+       struct work_struct      unjoin_work;
+       struct work_struct      join_complete_work;
+       int                     join_complete_status;
+       int                     join_dtim_period;
+       bool                    delayed_unjoin;
+
+       /* TX/RX and security */
+       s8                      wep_default_key_id;
+       struct work_struct      wep_key_work;
+       u32                     key_map;
+       struct wsm_add_key      keys[WSM_KEY_MAX_INDEX + 1];
+
+       /* AP powersave */
+       u32                     link_id_map;
+       struct cw1200_link_entry link_id_db[CW1200_MAX_STA_IN_AP_MODE];
+       struct work_struct      link_id_work;
+       struct delayed_work     link_id_gc_work;
+       u32                     sta_asleep_mask;
+       u32                     pspoll_mask;
+       bool                    aid0_bit_set;
+       spinlock_t              ps_state_lock; /* Protect power save state */
+       bool                    buffered_multicasts;
+       bool                    tx_multicast;
+       struct work_struct      set_tim_work;
+       struct work_struct      set_cts_work;
+       struct work_struct      multicast_start_work;
+       struct work_struct      multicast_stop_work;
+       struct timer_list       mcast_timeout;
+
+       /* WSM events and CQM implementation */
+       spinlock_t              event_queue_lock; /* Protect event queue */
+       struct list_head        event_queue;
+       struct work_struct      event_handler;
+
+       struct delayed_work     bss_loss_work;
+       spinlock_t              bss_loss_lock; /* Protect BSS loss state */
+       int                     bss_loss_state;
+       u32                     bss_loss_confirm_id;
+       int                     delayed_link_loss;
+       struct work_struct      bss_params_work;
+
+       /* TX rate policy cache */
+       struct tx_policy_cache tx_policy_cache;
+       struct work_struct tx_policy_upload_work;
+
+       /* legacy PS mode switch in suspend */
+       int                     ps_mode_switch_in_progress;
+       wait_queue_head_t       ps_mode_switch_done;
+
+       /* Workaround for WFD testcase 6.1.10*/
+       struct work_struct      linkid_reset_work;
+       u8                      action_frame_sa[ETH_ALEN];
+       u8                      action_linkid;
+};
+
+struct cw1200_sta_priv {
+       int link_id;
+};
+
+/* interfaces for the drivers */
+int cw1200_core_probe(const struct hwbus_ops *hwbus_ops,
+                     struct hwbus_priv *hwbus,
+                     struct device *pdev,
+                     struct cw1200_common **pself,
+                     int ref_clk, const u8 *macaddr,
+                     const char *sdd_path, bool have_5ghz);
+void cw1200_core_release(struct cw1200_common *self);
+
+#define FWLOAD_BLOCK_SIZE (1024)
+
+static inline int cw1200_is_ht(const struct cw1200_ht_info *ht_info)
+{
+       return ht_info->channel_type != NL80211_CHAN_NO_HT;
+}
+
+static inline int cw1200_ht_greenfield(const struct cw1200_ht_info *ht_info)
+{
+       return cw1200_is_ht(ht_info) &&
+               (ht_info->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+               !(ht_info->operation_mode &
+                 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+}
+
+static inline int cw1200_ht_ampdu_density(const struct cw1200_ht_info *ht_info)
+{
+       if (!cw1200_is_ht(ht_info))
+               return 0;
+       return ht_info->ht_cap.ampdu_density;
+}
+
+#endif /* CW1200_H */
diff --git a/drivers/net/wireless/cw1200/cw1200_sdio.c b/drivers/net/wireless/cw1200/cw1200_sdio.c
new file mode 100644 (file)
index 0000000..ebdcdf4
--- /dev/null
@@ -0,0 +1,425 @@
+/*
+ * Mac80211 SDIO driver for ST-Ericsson CW1200 device
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio.h>
+#include <net/mac80211.h>
+
+#include "cw1200.h"
+#include "hwbus.h"
+#include <linux/platform_data/net-cw1200.h>
+#include "hwio.h"
+
+MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>");
+MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SDIO driver");
+MODULE_LICENSE("GPL");
+
+#define SDIO_BLOCK_SIZE (512)
+
+/* Default platform data for Sagrad modules */
+static struct cw1200_platform_data_sdio sagrad_109x_evk_platform_data = {
+       .ref_clk = 38400,
+       .have_5ghz = false,
+       .sdd_file = "sdd_sagrad_1091_1098.bin",
+};
+
+/* Allow platform data to be overridden */
+static struct cw1200_platform_data_sdio *global_plat_data = &sagrad_109x_evk_platform_data;
+
+void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata)
+{
+       global_plat_data = pdata;
+}
+
+struct hwbus_priv {
+       struct sdio_func        *func;
+       struct cw1200_common    *core;
+       const struct cw1200_platform_data_sdio *pdata;
+};
+
+#ifndef SDIO_VENDOR_ID_STE
+#define SDIO_VENDOR_ID_STE             0x0020
+#endif
+
+#ifndef SDIO_DEVICE_ID_STE_CW1200
+#define SDIO_DEVICE_ID_STE_CW1200      0x2280
+#endif
+
+static const struct sdio_device_id cw1200_sdio_ids[] = {
+       { SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) },
+       { /* end: all zeroes */                 },
+};
+
+/* hwbus_ops implemetation */
+
+static int cw1200_sdio_memcpy_fromio(struct hwbus_priv *self,
+                                    unsigned int addr,
+                                    void *dst, int count)
+{
+       return sdio_memcpy_fromio(self->func, dst, addr, count);
+}
+
+static int cw1200_sdio_memcpy_toio(struct hwbus_priv *self,
+                                  unsigned int addr,
+                                  const void *src, int count)
+{
+       return sdio_memcpy_toio(self->func, addr, (void *)src, count);
+}
+
+static void cw1200_sdio_lock(struct hwbus_priv *self)
+{
+       sdio_claim_host(self->func);
+}
+
+static void cw1200_sdio_unlock(struct hwbus_priv *self)
+{
+       sdio_release_host(self->func);
+}
+
+static void cw1200_sdio_irq_handler(struct sdio_func *func)
+{
+       struct hwbus_priv *self = sdio_get_drvdata(func);
+
+       /* note:  sdio_host already claimed here. */
+       if (self->core)
+               cw1200_irq_handler(self->core);
+}
+
+static irqreturn_t cw1200_gpio_hardirq(int irq, void *dev_id)
+{
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t cw1200_gpio_irq(int irq, void *dev_id)
+{
+       struct hwbus_priv *self = dev_id;
+
+       if (self->core) {
+               sdio_claim_host(self->func);
+               cw1200_irq_handler(self->core);
+               sdio_release_host(self->func);
+               return IRQ_HANDLED;
+       } else {
+               return IRQ_NONE;
+       }
+}
+
+static int cw1200_request_irq(struct hwbus_priv *self)
+{
+       int ret;
+       u8 cccr;
+
+       cccr = sdio_f0_readb(self->func, SDIO_CCCR_IENx, &ret);
+       if (WARN_ON(ret))
+               goto err;
+
+       /* Master interrupt enable ... */
+       cccr |= BIT(0);
+
+       /* ... for our function */
+       cccr |= BIT(self->func->num);
+
+       sdio_f0_writeb(self->func, cccr, SDIO_CCCR_IENx, &ret);
+       if (WARN_ON(ret))
+               goto err;
+
+       ret = enable_irq_wake(self->pdata->irq);
+       if (WARN_ON(ret))
+               goto err;
+
+       /* Request the IRQ */
+       ret =  request_threaded_irq(self->pdata->irq, cw1200_gpio_hardirq,
+                                   cw1200_gpio_irq,
+                                   IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+                                   "cw1200_wlan_irq", self);
+       if (WARN_ON(ret))
+               goto err;
+
+       return 0;
+
+err:
+       return ret;
+}
+
+static int cw1200_sdio_irq_subscribe(struct hwbus_priv *self)
+{
+       int ret = 0;
+
+       pr_debug("SW IRQ subscribe\n");
+       sdio_claim_host(self->func);
+       if (self->pdata->irq)
+               ret = cw1200_request_irq(self);
+       else
+               ret = sdio_claim_irq(self->func, cw1200_sdio_irq_handler);
+
+       sdio_release_host(self->func);
+       return ret;
+}
+
+static int cw1200_sdio_irq_unsubscribe(struct hwbus_priv *self)
+{
+       int ret = 0;
+
+       pr_debug("SW IRQ unsubscribe\n");
+
+       if (self->pdata->irq) {
+               disable_irq_wake(self->pdata->irq);
+               free_irq(self->pdata->irq, self);
+       } else {
+               sdio_claim_host(self->func);
+               ret = sdio_release_irq(self->func);
+               sdio_release_host(self->func);
+       }
+       return ret;
+}
+
+static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata)
+{
+       if (pdata->reset) {
+               gpio_set_value(pdata->reset, 0);
+               msleep(30); /* Min is 2 * CLK32K cycles */
+               gpio_free(pdata->reset);
+       }
+
+       if (pdata->power_ctrl)
+               pdata->power_ctrl(pdata, false);
+       if (pdata->clk_ctrl)
+               pdata->clk_ctrl(pdata, false);
+
+       return 0;
+}
+
+static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata)
+{
+       /* Ensure I/Os are pulled low */
+       if (pdata->reset) {
+               gpio_request(pdata->reset, "cw1200_wlan_reset");
+               gpio_direction_output(pdata->reset, 0);
+       }
+       if (pdata->powerup) {
+               gpio_request(pdata->powerup, "cw1200_wlan_powerup");
+               gpio_direction_output(pdata->powerup, 0);
+       }
+       if (pdata->reset || pdata->powerup)
+               msleep(10); /* Settle time? */
+
+       /* Enable 3v3 and 1v8 to hardware */
+       if (pdata->power_ctrl) {
+               if (pdata->power_ctrl(pdata, true)) {
+                       pr_err("power_ctrl() failed!\n");
+                       return -1;
+               }
+       }
+
+       /* Enable CLK32K */
+       if (pdata->clk_ctrl) {
+               if (pdata->clk_ctrl(pdata, true)) {
+                       pr_err("clk_ctrl() failed!\n");
+                       return -1;
+               }
+               msleep(10); /* Delay until clock is stable for 2 cycles */
+       }
+
+       /* Enable POWERUP signal */
+       if (pdata->powerup) {
+               gpio_set_value(pdata->powerup, 1);
+               msleep(250); /* or more..? */
+       }
+       /* Enable RSTn signal */
+       if (pdata->reset) {
+               gpio_set_value(pdata->reset, 1);
+               msleep(50); /* Or more..? */
+       }
+       return 0;
+}
+
+static size_t cw1200_sdio_align_size(struct hwbus_priv *self, size_t size)
+{
+       if (self->pdata->no_nptb)
+               size = round_up(size, SDIO_BLOCK_SIZE);
+       else
+               size = sdio_align_size(self->func, size);
+
+       return size;
+}
+
+static int cw1200_sdio_pm(struct hwbus_priv *self, bool suspend)
+{
+       int ret = 0;
+
+       if (self->pdata->irq)
+               ret = irq_set_irq_wake(self->pdata->irq, suspend);
+       return ret;
+}
+
+static struct hwbus_ops cw1200_sdio_hwbus_ops = {
+       .hwbus_memcpy_fromio    = cw1200_sdio_memcpy_fromio,
+       .hwbus_memcpy_toio      = cw1200_sdio_memcpy_toio,
+       .lock                   = cw1200_sdio_lock,
+       .unlock                 = cw1200_sdio_unlock,
+       .align_size             = cw1200_sdio_align_size,
+       .power_mgmt             = cw1200_sdio_pm,
+};
+
+/* Probe Function to be called by SDIO stack when device is discovered */
+static int cw1200_sdio_probe(struct sdio_func *func,
+                            const struct sdio_device_id *id)
+{
+       struct hwbus_priv *self;
+       int status;
+
+       pr_info("cw1200_wlan_sdio: Probe called\n");
+
+       /* We are only able to handle the wlan function */
+       if (func->num != 0x01)
+               return -ENODEV;
+
+       self = kzalloc(sizeof(*self), GFP_KERNEL);
+       if (!self) {
+               pr_err("Can't allocate SDIO hwbus_priv.\n");
+               return -ENOMEM;
+       }
+
+       func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+
+       self->pdata = global_plat_data; /* FIXME */
+       self->func = func;
+       sdio_set_drvdata(func, self);
+       sdio_claim_host(func);
+       sdio_enable_func(func);
+       sdio_release_host(func);
+
+       status = cw1200_sdio_irq_subscribe(self);
+
+       status = cw1200_core_probe(&cw1200_sdio_hwbus_ops,
+                                  self, &func->dev, &self->core,
+                                  self->pdata->ref_clk,
+                                  self->pdata->macaddr,
+                                  self->pdata->sdd_file,
+                                  self->pdata->have_5ghz);
+       if (status) {
+               cw1200_sdio_irq_unsubscribe(self);
+               sdio_claim_host(func);
+               sdio_disable_func(func);
+               sdio_release_host(func);
+               sdio_set_drvdata(func, NULL);
+               kfree(self);
+       }
+
+       return status;
+}
+
+/* Disconnect Function to be called by SDIO stack when
+ * device is disconnected
+ */
+static void cw1200_sdio_disconnect(struct sdio_func *func)
+{
+       struct hwbus_priv *self = sdio_get_drvdata(func);
+
+       if (self) {
+               cw1200_sdio_irq_unsubscribe(self);
+               if (self->core) {
+                       cw1200_core_release(self->core);
+                       self->core = NULL;
+               }
+               sdio_claim_host(func);
+               sdio_disable_func(func);
+               sdio_release_host(func);
+               sdio_set_drvdata(func, NULL);
+               kfree(self);
+       }
+}
+
+#ifdef CONFIG_PM
+static int cw1200_sdio_suspend(struct device *dev)
+{
+       int ret;
+       struct sdio_func *func = dev_to_sdio_func(dev);
+       struct hwbus_priv *self = sdio_get_drvdata(func);
+
+       if (!cw1200_can_suspend(self->core))
+               return -EAGAIN;
+
+       /* Notify SDIO that CW1200 will remain powered during suspend */
+       ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+       if (ret)
+               pr_err("Error setting SDIO pm flags: %i\n", ret);
+
+       return ret;
+}
+
+static int cw1200_sdio_resume(struct device *dev)
+{
+       return 0;
+}
+
+static const struct dev_pm_ops cw1200_pm_ops = {
+       .suspend = cw1200_sdio_suspend,
+       .resume = cw1200_sdio_resume,
+};
+#endif
+
+static struct sdio_driver sdio_driver = {
+       .name           = "cw1200_wlan_sdio",
+       .id_table       = cw1200_sdio_ids,
+       .probe          = cw1200_sdio_probe,
+       .remove         = cw1200_sdio_disconnect,
+#ifdef CONFIG_PM
+       .drv = {
+               .pm = &cw1200_pm_ops,
+       }
+#endif
+};
+
+/* Init Module function -> Called by insmod */
+static int __init cw1200_sdio_init(void)
+{
+       const struct cw1200_platform_data_sdio *pdata;
+       int ret;
+
+       /* FIXME -- this won't support multiple devices */
+       pdata = global_plat_data;
+
+       if (cw1200_sdio_on(pdata)) {
+               ret = -1;
+               goto err;
+       }
+
+       ret = sdio_register_driver(&sdio_driver);
+       if (ret)
+               goto err;
+
+       return 0;
+
+err:
+       cw1200_sdio_off(pdata);
+       return ret;
+}
+
+/* Called at Driver Unloading */
+static void __exit cw1200_sdio_exit(void)
+{
+       const struct cw1200_platform_data_sdio *pdata;
+
+       /* FIXME -- this won't support multiple devices */
+       pdata = global_plat_data;
+       sdio_unregister_driver(&sdio_driver);
+       cw1200_sdio_off(pdata);
+}
+
+
+module_init(cw1200_sdio_init);
+module_exit(cw1200_sdio_exit);
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
new file mode 100644 (file)
index 0000000..d063760
--- /dev/null
@@ -0,0 +1,471 @@
+/*
+ * Mac80211 SPI driver for ST-Ericsson CW1200 device
+ *
+ * Copyright (c) 2011, Sagrad Inc.
+ * Author:  Solomon Peachy <speachy@sagrad.com>
+ *
+ * Based on cw1200_sdio.c
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <net/mac80211.h>
+
+#include <linux/spi/spi.h>
+#include <linux/device.h>
+
+#include "cw1200.h"
+#include "hwbus.h"
+#include <linux/platform_data/net-cw1200.h>
+#include "hwio.h"
+
+MODULE_AUTHOR("Solomon Peachy <speachy@sagrad.com>");
+MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SPI driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:cw1200_wlan_spi");
+
+/* #define SPI_DEBUG */
+
+struct hwbus_priv {
+       struct spi_device       *func;
+       struct cw1200_common    *core;
+       const struct cw1200_platform_data_spi *pdata;
+       spinlock_t              lock; /* Serialize all bus operations */
+       int claimed;
+};
+
+#define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2)
+#define SET_WRITE 0x7FFF /* usage: and operation */
+#define SET_READ 0x8000  /* usage: or operation */
+
+/* Notes on byte ordering:
+   LE:  B0 B1 B2 B3
+   BE:  B3 B2 B1 B0
+
+   Hardware expects 32-bit data to be written as 16-bit BE words:
+
+   B1 B0 B3 B2
+*/
+
+static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self,
+                                    unsigned int addr,
+                                    void *dst, int count)
+{
+       int ret, i;
+       u16 regaddr;
+       struct spi_message      m;
+
+       struct spi_transfer     t_addr = {
+               .tx_buf         = &regaddr,
+               .len            = sizeof(regaddr),
+       };
+       struct spi_transfer     t_msg = {
+               .rx_buf         = dst,
+               .len            = count,
+       };
+
+       regaddr = (SDIO_TO_SPI_ADDR(addr))<<12;
+       regaddr |= SET_READ;
+       regaddr |= (count>>1);
+
+#ifdef SPI_DEBUG
+       pr_info("READ : %04d from 0x%02x (%04x)\n", count, addr, regaddr);
+#endif
+
+       /* Header is LE16 */
+       regaddr = cpu_to_le16(regaddr);
+
+       /* We have to byteswap if the SPI bus is limited to 8b operation
+          or we are running on a Big Endian system
+       */
+#if defined(__LITTLE_ENDIAN)
+       if (self->func->bits_per_word == 8)
+#endif
+               regaddr = swab16(regaddr);
+
+       spi_message_init(&m);
+       spi_message_add_tail(&t_addr, &m);
+       spi_message_add_tail(&t_msg, &m);
+       ret = spi_sync(self->func, &m);
+
+#ifdef SPI_DEBUG
+       pr_info("READ : ");
+       for (i = 0; i < t_addr.len; i++)
+               printk("%02x ", ((u8 *)t_addr.tx_buf)[i]);
+       printk(" : ");
+       for (i = 0; i < t_msg.len; i++)
+               printk("%02x ", ((u8 *)t_msg.rx_buf)[i]);
+       printk("\n");
+#endif
+
+       /* We have to byteswap if the SPI bus is limited to 8b operation
+          or we are running on a Big Endian system
+       */
+#if defined(__LITTLE_ENDIAN)
+       if (self->func->bits_per_word == 8)
+#endif
+       {
+               uint16_t *buf = (uint16_t *)dst;
+               for (i = 0; i < ((count + 1) >> 1); i++)
+                       buf[i] = swab16(buf[i]);
+       }
+
+       return ret;
+}
+
+static int cw1200_spi_memcpy_toio(struct hwbus_priv *self,
+                                  unsigned int addr,
+                                  const void *src, int count)
+{
+       int rval, i;
+       u16 regaddr;
+       struct spi_transfer     t_addr = {
+               .tx_buf         = &regaddr,
+               .len            = sizeof(regaddr),
+       };
+       struct spi_transfer     t_msg = {
+               .tx_buf         = src,
+               .len            = count,
+       };
+       struct spi_message      m;
+
+       regaddr = (SDIO_TO_SPI_ADDR(addr))<<12;
+       regaddr &= SET_WRITE;
+       regaddr |= (count>>1);
+
+#ifdef SPI_DEBUG
+       pr_info("WRITE: %04d  to  0x%02x (%04x)\n", count, addr, regaddr);
+#endif
+
+       /* Header is LE16 */
+       regaddr = cpu_to_le16(regaddr);
+
+       /* We have to byteswap if the SPI bus is limited to 8b operation
+          or we are running on a Big Endian system
+       */
+#if defined(__LITTLE_ENDIAN)
+       if (self->func->bits_per_word == 8)
+#endif
+       {
+               uint16_t *buf = (uint16_t *)src;
+               regaddr = swab16(regaddr);
+               for (i = 0; i < ((count + 1) >> 1); i++)
+                       buf[i] = swab16(buf[i]);
+       }
+
+#ifdef SPI_DEBUG
+       pr_info("WRITE: ");
+       for (i = 0; i < t_addr.len; i++)
+               printk("%02x ", ((u8 *)t_addr.tx_buf)[i]);
+       printk(" : ");
+       for (i = 0; i < t_msg.len; i++)
+               printk("%02x ", ((u8 *)t_msg.tx_buf)[i]);
+       printk("\n");
+#endif
+
+       spi_message_init(&m);
+       spi_message_add_tail(&t_addr, &m);
+       spi_message_add_tail(&t_msg, &m);
+       rval = spi_sync(self->func, &m);
+
+#ifdef SPI_DEBUG
+       pr_info("WROTE: %d\n", m.actual_length);
+#endif
+
+#if defined(__LITTLE_ENDIAN)
+       /* We have to byteswap if the SPI bus is limited to 8b operation */
+       if (self->func->bits_per_word == 8)
+#endif
+       {
+               uint16_t *buf = (uint16_t *)src;
+               for (i = 0; i < ((count + 1) >> 1); i++)
+                       buf[i] = swab16(buf[i]);
+       }
+       return rval;
+}
+
+static void cw1200_spi_lock(struct hwbus_priv *self)
+{
+       unsigned long flags;
+
+       might_sleep();
+
+       spin_lock_irqsave(&self->lock, flags);
+       while (1) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               if (!self->claimed)
+                       break;
+               spin_unlock_irqrestore(&self->lock, flags);
+               schedule();
+               spin_lock_irqsave(&self->lock, flags);
+       }
+       set_current_state(TASK_RUNNING);
+       self->claimed = 1;
+       spin_unlock_irqrestore(&self->lock, flags);
+
+       return;
+}
+
+static void cw1200_spi_unlock(struct hwbus_priv *self)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&self->lock, flags);
+       self->claimed = 0;
+       spin_unlock_irqrestore(&self->lock, flags);
+       return;
+}
+
+static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
+{
+       struct hwbus_priv *self = dev_id;
+
+       if (self->core) {
+               cw1200_irq_handler(self->core);
+               return IRQ_HANDLED;
+       } else {
+               return IRQ_NONE;
+       }
+}
+
+static int cw1200_spi_irq_subscribe(struct hwbus_priv *self)
+{
+       int ret;
+
+       pr_debug("SW IRQ subscribe\n");
+
+       ret = request_any_context_irq(self->func->irq, cw1200_spi_irq_handler,
+                                     IRQF_TRIGGER_HIGH,
+                                     "cw1200_wlan_irq", self);
+       if (WARN_ON(ret < 0))
+               goto exit;
+
+       ret = enable_irq_wake(self->func->irq);
+       if (WARN_ON(ret))
+               goto free_irq;
+
+       return 0;
+
+free_irq:
+       free_irq(self->func->irq, self);
+exit:
+       return ret;
+}
+
+static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
+{
+       int ret = 0;
+
+       pr_debug("SW IRQ unsubscribe\n");
+       disable_irq_wake(self->func->irq);
+       free_irq(self->func->irq, self);
+
+       return ret;
+}
+
+static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
+{
+       if (pdata->reset) {
+               gpio_set_value(pdata->reset, 0);
+               msleep(30); /* Min is 2 * CLK32K cycles */
+               gpio_free(pdata->reset);
+       }
+
+       if (pdata->power_ctrl)
+               pdata->power_ctrl(pdata, false);
+       if (pdata->clk_ctrl)
+               pdata->clk_ctrl(pdata, false);
+
+       return 0;
+}
+
+static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata)
+{
+       /* Ensure I/Os are pulled low */
+       if (pdata->reset) {
+               gpio_request(pdata->reset, "cw1200_wlan_reset");
+               gpio_direction_output(pdata->reset, 0);
+       }
+       if (pdata->powerup) {
+               gpio_request(pdata->powerup, "cw1200_wlan_powerup");
+               gpio_direction_output(pdata->powerup, 0);
+       }
+       if (pdata->reset || pdata->powerup)
+               msleep(10); /* Settle time? */
+
+       /* Enable 3v3 and 1v8 to hardware */
+       if (pdata->power_ctrl) {
+               if (pdata->power_ctrl(pdata, true)) {
+                       pr_err("power_ctrl() failed!\n");
+                       return -1;
+               }
+       }
+
+       /* Enable CLK32K */
+       if (pdata->clk_ctrl) {
+               if (pdata->clk_ctrl(pdata, true)) {
+                       pr_err("clk_ctrl() failed!\n");
+                       return -1;
+               }
+               msleep(10); /* Delay until clock is stable for 2 cycles */
+       }
+
+       /* Enable POWERUP signal */
+       if (pdata->powerup) {
+               gpio_set_value(pdata->powerup, 1);
+               msleep(250); /* or more..? */
+       }
+       /* Enable RSTn signal */
+       if (pdata->reset) {
+               gpio_set_value(pdata->reset, 1);
+               msleep(50); /* Or more..? */
+       }
+       return 0;
+}
+
+static size_t cw1200_spi_align_size(struct hwbus_priv *self, size_t size)
+{
+       return size & 1 ? size + 1 : size;
+}
+
+static int cw1200_spi_pm(struct hwbus_priv *self, bool suspend)
+{
+       return irq_set_irq_wake(self->func->irq, suspend);
+}
+
+static struct hwbus_ops cw1200_spi_hwbus_ops = {
+       .hwbus_memcpy_fromio    = cw1200_spi_memcpy_fromio,
+       .hwbus_memcpy_toio      = cw1200_spi_memcpy_toio,
+       .lock                   = cw1200_spi_lock,
+       .unlock                 = cw1200_spi_unlock,
+       .align_size             = cw1200_spi_align_size,
+       .power_mgmt             = cw1200_spi_pm,
+};
+
+/* Probe Function to be called by SPI stack when device is discovered */
+static int cw1200_spi_probe(struct spi_device *func)
+{
+       const struct cw1200_platform_data_spi *plat_data =
+               func->dev.platform_data;
+       struct hwbus_priv *self;
+       int status;
+
+       /* Sanity check speed */
+       if (func->max_speed_hz > 52000000)
+               func->max_speed_hz = 52000000;
+       if (func->max_speed_hz < 1000000)
+               func->max_speed_hz = 1000000;
+
+       /* Fix up transfer size */
+       if (plat_data->spi_bits_per_word)
+               func->bits_per_word = plat_data->spi_bits_per_word;
+       if (!func->bits_per_word)
+               func->bits_per_word = 16;
+
+       /* And finally.. */
+       func->mode = SPI_MODE_0;
+
+       pr_info("cw1200_wlan_spi: Probe called (CS %d M %d BPW %d CLK %d)\n",
+               func->chip_select, func->mode, func->bits_per_word,
+               func->max_speed_hz);
+
+       if (cw1200_spi_on(plat_data)) {
+               pr_err("spi_on() failed!\n");
+               return -1;
+       }
+
+       if (spi_setup(func)) {
+               pr_err("spi_setup() failed!\n");
+               return -1;
+       }
+
+       self = kzalloc(sizeof(*self), GFP_KERNEL);
+       if (!self) {
+               pr_err("Can't allocate SPI hwbus_priv.");
+               return -ENOMEM;
+       }
+
+       self->pdata = plat_data;
+       self->func = func;
+       spin_lock_init(&self->lock);
+
+       spi_set_drvdata(func, self);
+
+       status = cw1200_spi_irq_subscribe(self);
+
+       status = cw1200_core_probe(&cw1200_spi_hwbus_ops,
+                                  self, &func->dev, &self->core,
+                                  self->pdata->ref_clk,
+                                  self->pdata->macaddr,
+                                  self->pdata->sdd_file,
+                                  self->pdata->have_5ghz);
+
+       if (status) {
+               cw1200_spi_irq_unsubscribe(self);
+               cw1200_spi_off(plat_data);
+               kfree(self);
+       }
+
+       return status;
+}
+
+/* Disconnect Function to be called by SPI stack when device is disconnected */
+static int cw1200_spi_disconnect(struct spi_device *func)
+{
+       struct hwbus_priv *self = spi_get_drvdata(func);
+
+       if (self) {
+               cw1200_spi_irq_unsubscribe(self);
+               if (self->core) {
+                       cw1200_core_release(self->core);
+                       self->core = NULL;
+               }
+               kfree(self);
+       }
+       cw1200_spi_off(func->dev.platform_data);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int cw1200_spi_suspend(struct device *dev, pm_message_t state)
+{
+       struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev));
+
+       if (!cw1200_can_suspend(self->core))
+               return -EAGAIN;
+
+       /* XXX notify host that we have to keep CW1200 powered on? */
+       return 0;
+}
+
+static int cw1200_spi_resume(struct device *dev)
+{
+       return 0;
+}
+#endif
+
+static struct spi_driver spi_driver = {
+       .probe          = cw1200_spi_probe,
+       .remove         = cw1200_spi_disconnect,
+       .driver = {
+               .name           = "cw1200_wlan_spi",
+               .bus            = &spi_bus_type,
+               .owner          = THIS_MODULE,
+#ifdef CONFIG_PM
+               .suspend        = cw1200_spi_suspend,
+               .resume         = cw1200_spi_resume,
+#endif
+       },
+};
+
+module_spi_driver(spi_driver);
diff --git a/drivers/net/wireless/cw1200/debug.c b/drivers/net/wireless/cw1200/debug.c
new file mode 100644 (file)
index 0000000..e323b4d
--- /dev/null
@@ -0,0 +1,428 @@
+/*
+ * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers
+ * DebugFS code
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "cw1200.h"
+#include "debug.h"
+#include "fwio.h"
+
+/* join_status */
+static const char * const cw1200_debug_join_status[] = {
+       "passive",
+       "monitor",
+       "station (joining)",
+       "station (not authenticated yet)",
+       "station",
+       "adhoc",
+       "access point",
+};
+
+/* WSM_JOIN_PREAMBLE_... */
+static const char * const cw1200_debug_preamble[] = {
+       "long",
+       "short",
+       "long on 1 and 2 Mbps",
+};
+
+
+static const char * const cw1200_debug_link_id[] = {
+       "OFF",
+       "REQ",
+       "SOFT",
+       "HARD",
+};
+
+static const char *cw1200_debug_mode(int mode)
+{
+       switch (mode) {
+       case NL80211_IFTYPE_UNSPECIFIED:
+               return "unspecified";
+       case NL80211_IFTYPE_MONITOR:
+               return "monitor";
+       case NL80211_IFTYPE_STATION:
+               return "station";
+       case NL80211_IFTYPE_ADHOC:
+               return "adhoc";
+       case NL80211_IFTYPE_MESH_POINT:
+               return "mesh point";
+       case NL80211_IFTYPE_AP:
+               return "access point";
+       case NL80211_IFTYPE_P2P_CLIENT:
+               return "p2p client";
+       case NL80211_IFTYPE_P2P_GO:
+               return "p2p go";
+       default:
+               return "unsupported";
+       }
+}
+
+static void cw1200_queue_status_show(struct seq_file *seq,
+                                    struct cw1200_queue *q)
+{
+       int i;
+       seq_printf(seq, "Queue       %d:\n", q->queue_id);
+       seq_printf(seq, "  capacity: %zu\n", q->capacity);
+       seq_printf(seq, "  queued:   %zu\n", q->num_queued);
+       seq_printf(seq, "  pending:  %zu\n", q->num_pending);
+       seq_printf(seq, "  sent:     %zu\n", q->num_sent);
+       seq_printf(seq, "  locked:   %s\n", q->tx_locked_cnt ? "yes" : "no");
+       seq_printf(seq, "  overfull: %s\n", q->overfull ? "yes" : "no");
+       seq_puts(seq,   "  link map: 0-> ");
+       for (i = 0; i < q->stats->map_capacity; ++i)
+               seq_printf(seq, "%.2d ", q->link_map_cache[i]);
+       seq_printf(seq, "<-%zu\n", q->stats->map_capacity);
+}
+
+static void cw1200_debug_print_map(struct seq_file *seq,
+                                  struct cw1200_common *priv,
+                                  const char *label,
+                                  u32 map)
+{
+       int i;
+       seq_printf(seq, "%s0-> ", label);
+       for (i = 0; i < priv->tx_queue_stats.map_capacity; ++i)
+               seq_printf(seq, "%s ", (map & BIT(i)) ? "**" : "..");
+       seq_printf(seq, "<-%zu\n", priv->tx_queue_stats.map_capacity - 1);
+}
+
+static int cw1200_status_show(struct seq_file *seq, void *v)
+{
+       int i;
+       struct list_head *item;
+       struct cw1200_common *priv = seq->private;
+       struct cw1200_debug_priv *d = priv->debug;
+
+       seq_puts(seq,   "CW1200 Wireless LAN driver status\n");
+       seq_printf(seq, "Hardware:   %d.%d\n",
+                  priv->wsm_caps.hw_id,
+                  priv->wsm_caps.hw_subid);
+       seq_printf(seq, "Firmware:   %s %d.%d\n",
+                  cw1200_fw_types[priv->wsm_caps.fw_type],
+                  priv->wsm_caps.fw_ver,
+                  priv->wsm_caps.fw_build);
+       seq_printf(seq, "FW API:     %d\n",
+                  priv->wsm_caps.fw_api);
+       seq_printf(seq, "FW caps:    0x%.4X\n",
+                  priv->wsm_caps.fw_cap);
+       seq_printf(seq, "FW label:  '%s'\n",
+                  priv->wsm_caps.fw_label);
+       seq_printf(seq, "Mode:       %s%s\n",
+                  cw1200_debug_mode(priv->mode),
+                  priv->listening ? " (listening)" : "");
+       seq_printf(seq, "Join state: %s\n",
+                  cw1200_debug_join_status[priv->join_status]);
+       if (priv->channel)
+               seq_printf(seq, "Channel:    %d%s\n",
+                          priv->channel->hw_value,
+                          priv->channel_switch_in_progress ?
+                          " (switching)" : "");
+       if (priv->rx_filter.promiscuous)
+               seq_puts(seq,   "Filter:     promisc\n");
+       else if (priv->rx_filter.fcs)
+               seq_puts(seq,   "Filter:     fcs\n");
+       if (priv->rx_filter.bssid)
+               seq_puts(seq,   "Filter:     bssid\n");
+       if (!priv->disable_beacon_filter)
+               seq_puts(seq,   "Filter:     beacons\n");
+
+       if (priv->enable_beacon ||
+           priv->mode == NL80211_IFTYPE_AP ||
+           priv->mode == NL80211_IFTYPE_ADHOC ||
+           priv->mode == NL80211_IFTYPE_MESH_POINT ||
+           priv->mode == NL80211_IFTYPE_P2P_GO)
+               seq_printf(seq, "Beaconing:  %s\n",
+                          priv->enable_beacon ?
+                          "enabled" : "disabled");
+
+       for (i = 0; i < 4; ++i)
+               seq_printf(seq, "EDCA(%d):    %d, %d, %d, %d, %d\n", i,
+                          priv->edca.params[i].cwmin,
+                          priv->edca.params[i].cwmax,
+                          priv->edca.params[i].aifns,
+                          priv->edca.params[i].txop_limit,
+                          priv->edca.params[i].max_rx_lifetime);
+
+       if (priv->join_status == CW1200_JOIN_STATUS_STA) {
+               static const char *pm_mode = "unknown";
+               switch (priv->powersave_mode.mode) {
+               case WSM_PSM_ACTIVE:
+                       pm_mode = "off";
+                       break;
+               case WSM_PSM_PS:
+                       pm_mode = "on";
+                       break;
+               case WSM_PSM_FAST_PS:
+                       pm_mode = "dynamic";
+                       break;
+               }
+               seq_printf(seq, "Preamble:   %s\n",
+                          cw1200_debug_preamble[priv->association_mode.preamble]);
+               seq_printf(seq, "AMPDU spcn: %d\n",
+                          priv->association_mode.mpdu_start_spacing);
+               seq_printf(seq, "Basic rate: 0x%.8X\n",
+                          le32_to_cpu(priv->association_mode.basic_rate_set));
+               seq_printf(seq, "Bss lost:   %d beacons\n",
+                          priv->bss_params.beacon_lost_count);
+               seq_printf(seq, "AID:        %d\n",
+                          priv->bss_params.aid);
+               seq_printf(seq, "Rates:      0x%.8X\n",
+                          priv->bss_params.operational_rate_set);
+               seq_printf(seq, "Powersave:  %s\n", pm_mode);
+       }
+       seq_printf(seq, "HT:         %s\n",
+                  cw1200_is_ht(&priv->ht_info) ? "on" : "off");
+       if (cw1200_is_ht(&priv->ht_info)) {
+               seq_printf(seq, "Greenfield: %s\n",
+                          cw1200_ht_greenfield(&priv->ht_info) ? "yes" : "no");
+               seq_printf(seq, "AMPDU dens: %d\n",
+                          cw1200_ht_ampdu_density(&priv->ht_info));
+       }
+       seq_printf(seq, "RSSI thold: %d\n",
+                  priv->cqm_rssi_thold);
+       seq_printf(seq, "RSSI hyst:  %d\n",
+                  priv->cqm_rssi_hyst);
+       seq_printf(seq, "Long retr:  %d\n",
+                  priv->long_frame_max_tx_count);
+       seq_printf(seq, "Short retr: %d\n",
+                  priv->short_frame_max_tx_count);
+       spin_lock_bh(&priv->tx_policy_cache.lock);
+       i = 0;
+       list_for_each(item, &priv->tx_policy_cache.used)
+               ++i;
+       spin_unlock_bh(&priv->tx_policy_cache.lock);
+       seq_printf(seq, "RC in use:  %d\n", i);
+
+       seq_puts(seq, "\n");
+       for (i = 0; i < 4; ++i) {
+               cw1200_queue_status_show(seq, &priv->tx_queue[i]);
+               seq_puts(seq, "\n");
+       }
+
+       cw1200_debug_print_map(seq, priv, "Link map:   ",
+                              priv->link_id_map);
+       cw1200_debug_print_map(seq, priv, "Asleep map: ",
+                              priv->sta_asleep_mask);
+       cw1200_debug_print_map(seq, priv, "PSPOLL map: ",
+                              priv->pspoll_mask);
+
+       seq_puts(seq, "\n");
+
+       for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
+               if (priv->link_id_db[i].status) {
+                       seq_printf(seq, "Link %d:     %s, %pM\n",
+                                  i + 1,
+                                  cw1200_debug_link_id[priv->link_id_db[i].status],
+                                  priv->link_id_db[i].mac);
+               }
+       }
+
+       seq_puts(seq, "\n");
+
+       seq_printf(seq, "BH status:  %s\n",
+                  atomic_read(&priv->bh_term) ? "terminated" : "alive");
+       seq_printf(seq, "Pending RX: %d\n",
+                  atomic_read(&priv->bh_rx));
+       seq_printf(seq, "Pending TX: %d\n",
+                  atomic_read(&priv->bh_tx));
+       if (priv->bh_error)
+               seq_printf(seq, "BH errcode: %d\n",
+                          priv->bh_error);
+       seq_printf(seq, "TX bufs:    %d x %d bytes\n",
+                  priv->wsm_caps.input_buffers,
+                  priv->wsm_caps.input_buffer_size);
+       seq_printf(seq, "Used bufs:  %d\n",
+                  priv->hw_bufs_used);
+       seq_printf(seq, "Powermgmt:  %s\n",
+                  priv->powersave_enabled ? "on" : "off");
+       seq_printf(seq, "Device:     %s\n",
+                  priv->device_can_sleep ? "asleep" : "awake");
+
+       spin_lock(&priv->wsm_cmd.lock);
+       seq_printf(seq, "WSM status: %s\n",
+                  priv->wsm_cmd.done ? "idle" : "active");
+       seq_printf(seq, "WSM cmd:    0x%.4X (%td bytes)\n",
+                  priv->wsm_cmd.cmd, priv->wsm_cmd.len);
+       seq_printf(seq, "WSM retval: %d\n",
+                  priv->wsm_cmd.ret);
+       spin_unlock(&priv->wsm_cmd.lock);
+
+       seq_printf(seq, "Datapath:   %s\n",
+                  atomic_read(&priv->tx_lock) ? "locked" : "unlocked");
+       if (atomic_read(&priv->tx_lock))
+               seq_printf(seq, "TXlock cnt: %d\n",
+                          atomic_read(&priv->tx_lock));
+
+       seq_printf(seq, "TXed:       %d\n",
+                  d->tx);
+       seq_printf(seq, "AGG TXed:   %d\n",
+                  d->tx_agg);
+       seq_printf(seq, "MULTI TXed: %d (%d)\n",
+                  d->tx_multi, d->tx_multi_frames);
+       seq_printf(seq, "RXed:       %d\n",
+                  d->rx);
+       seq_printf(seq, "AGG RXed:   %d\n",
+                  d->rx_agg);
+       seq_printf(seq, "TX miss:    %d\n",
+                  d->tx_cache_miss);
+       seq_printf(seq, "TX align:   %d\n",
+                  d->tx_align);
+       seq_printf(seq, "TX burst:   %d\n",
+                  d->tx_burst);
+       seq_printf(seq, "TX TTL:     %d\n",
+                  d->tx_ttl);
+       seq_printf(seq, "Scan:       %s\n",
+                  atomic_read(&priv->scan.in_progress) ? "active" : "idle");
+
+       return 0;
+}
+
+static int cw1200_status_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, &cw1200_status_show,
+               inode->i_private);
+}
+
+static const struct file_operations fops_status = {
+       .open = cw1200_status_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .owner = THIS_MODULE,
+};
+
+static int cw1200_counters_show(struct seq_file *seq, void *v)
+{
+       int ret;
+       struct cw1200_common *priv = seq->private;
+       struct wsm_mib_counters_table counters;
+
+       ret = wsm_get_counters_table(priv, &counters);
+       if (ret)
+               return ret;
+
+#define PUT_COUNTER(tab, name) \
+       seq_printf(seq, "%s:" tab "%d\n", #name, \
+               __le32_to_cpu(counters.name))
+
+       PUT_COUNTER("\t\t", plcp_errors);
+       PUT_COUNTER("\t\t", fcs_errors);
+       PUT_COUNTER("\t\t", tx_packets);
+       PUT_COUNTER("\t\t", rx_packets);
+       PUT_COUNTER("\t\t", rx_packet_errors);
+       PUT_COUNTER("\t",   rx_decryption_failures);
+       PUT_COUNTER("\t\t", rx_mic_failures);
+       PUT_COUNTER("\t",   rx_no_key_failures);
+       PUT_COUNTER("\t",   tx_multicast_frames);
+       PUT_COUNTER("\t",   tx_frames_success);
+       PUT_COUNTER("\t",   tx_frame_failures);
+       PUT_COUNTER("\t",   tx_frames_retried);
+       PUT_COUNTER("\t",   tx_frames_multi_retried);
+       PUT_COUNTER("\t",   rx_frame_duplicates);
+       PUT_COUNTER("\t\t", rts_success);
+       PUT_COUNTER("\t\t", rts_failures);
+       PUT_COUNTER("\t\t", ack_failures);
+       PUT_COUNTER("\t",   rx_multicast_frames);
+       PUT_COUNTER("\t",   rx_frames_success);
+       PUT_COUNTER("\t",   rx_cmac_icv_errors);
+       PUT_COUNTER("\t\t", rx_cmac_replays);
+       PUT_COUNTER("\t",   rx_mgmt_ccmp_replays);
+
+#undef PUT_COUNTER
+
+       return 0;
+}
+
+static int cw1200_counters_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, &cw1200_counters_show,
+               inode->i_private);
+}
+
+static const struct file_operations fops_counters = {
+       .open = cw1200_counters_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .owner = THIS_MODULE,
+};
+
+static ssize_t cw1200_wsm_dumps(struct file *file,
+       const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       struct cw1200_common *priv = file->private_data;
+       char buf[1];
+
+       if (!count)
+               return -EINVAL;
+       if (copy_from_user(buf, user_buf, 1))
+               return -EFAULT;
+
+       if (buf[0] == '1')
+               priv->wsm_enable_wsm_dumps = 1;
+       else
+               priv->wsm_enable_wsm_dumps = 0;
+
+       return count;
+}
+
+static const struct file_operations fops_wsm_dumps = {
+       .open = simple_open,
+       .write = cw1200_wsm_dumps,
+       .llseek = default_llseek,
+};
+
+int cw1200_debug_init(struct cw1200_common *priv)
+{
+       int ret = -ENOMEM;
+       struct cw1200_debug_priv *d = kzalloc(sizeof(struct cw1200_debug_priv),
+                       GFP_KERNEL);
+       priv->debug = d;
+       if (!d)
+               return ret;
+
+       d->debugfs_phy = debugfs_create_dir("cw1200",
+                                           priv->hw->wiphy->debugfsdir);
+       if (!d->debugfs_phy)
+               goto err;
+
+       if (!debugfs_create_file("status", S_IRUSR, d->debugfs_phy,
+                                priv, &fops_status))
+               goto err;
+
+       if (!debugfs_create_file("counters", S_IRUSR, d->debugfs_phy,
+                                priv, &fops_counters))
+               goto err;
+
+       if (!debugfs_create_file("wsm_dumps", S_IWUSR, d->debugfs_phy,
+                                priv, &fops_wsm_dumps))
+               goto err;
+
+       return 0;
+
+err:
+       priv->debug = NULL;
+       debugfs_remove_recursive(d->debugfs_phy);
+       kfree(d);
+       return ret;
+}
+
+void cw1200_debug_release(struct cw1200_common *priv)
+{
+       struct cw1200_debug_priv *d = priv->debug;
+       if (d) {
+               debugfs_remove_recursive(d->debugfs_phy);
+               priv->debug = NULL;
+               kfree(d);
+       }
+}
diff --git a/drivers/net/wireless/cw1200/debug.h b/drivers/net/wireless/cw1200/debug.h
new file mode 100644 (file)
index 0000000..b525aba
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * DebugFS code for ST-Ericsson CW1200 mac80211 driver
+ *
+ * Copyright (c) 2011, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_DEBUG_H_INCLUDED
+#define CW1200_DEBUG_H_INCLUDED
+
+struct cw1200_debug_priv {
+       struct dentry *debugfs_phy;
+       int tx;
+       int tx_agg;
+       int rx;
+       int rx_agg;
+       int tx_multi;
+       int tx_multi_frames;
+       int tx_cache_miss;
+       int tx_align;
+       int tx_ttl;
+       int tx_burst;
+       int ba_cnt;
+       int ba_acc;
+       int ba_cnt_rx;
+       int ba_acc_rx;
+};
+
+int cw1200_debug_init(struct cw1200_common *priv);
+void cw1200_debug_release(struct cw1200_common *priv);
+
+static inline void cw1200_debug_txed(struct cw1200_common *priv)
+{
+       ++priv->debug->tx;
+}
+
+static inline void cw1200_debug_txed_agg(struct cw1200_common *priv)
+{
+       ++priv->debug->tx_agg;
+}
+
+static inline void cw1200_debug_txed_multi(struct cw1200_common *priv,
+                                          int count)
+{
+       ++priv->debug->tx_multi;
+       priv->debug->tx_multi_frames += count;
+}
+
+static inline void cw1200_debug_rxed(struct cw1200_common *priv)
+{
+       ++priv->debug->rx;
+}
+
+static inline void cw1200_debug_rxed_agg(struct cw1200_common *priv)
+{
+       ++priv->debug->rx_agg;
+}
+
+static inline void cw1200_debug_tx_cache_miss(struct cw1200_common *priv)
+{
+       ++priv->debug->tx_cache_miss;
+}
+
+static inline void cw1200_debug_tx_align(struct cw1200_common *priv)
+{
+       ++priv->debug->tx_align;
+}
+
+static inline void cw1200_debug_tx_ttl(struct cw1200_common *priv)
+{
+       ++priv->debug->tx_ttl;
+}
+
+static inline void cw1200_debug_tx_burst(struct cw1200_common *priv)
+{
+       ++priv->debug->tx_burst;
+}
+
+static inline void cw1200_debug_ba(struct cw1200_common *priv,
+                                  int ba_cnt, int ba_acc,
+                                  int ba_cnt_rx, int ba_acc_rx)
+{
+       priv->debug->ba_cnt = ba_cnt;
+       priv->debug->ba_acc = ba_acc;
+       priv->debug->ba_cnt_rx = ba_cnt_rx;
+       priv->debug->ba_acc_rx = ba_acc_rx;
+}
+
+#endif /* CW1200_DEBUG_H_INCLUDED */
diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
new file mode 100644 (file)
index 0000000..acdff0f
--- /dev/null
@@ -0,0 +1,520 @@
+/*
+ * Firmware I/O code for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+
+#include "cw1200.h"
+#include "fwio.h"
+#include "hwio.h"
+#include "hwbus.h"
+#include "bh.h"
+
+static int cw1200_get_hw_type(u32 config_reg_val, int *major_revision)
+{
+       int hw_type = -1;
+       u32 silicon_type = (config_reg_val >> 24) & 0x7;
+       u32 silicon_vers = (config_reg_val >> 31) & 0x1;
+
+       switch (silicon_type) {
+       case 0x00:
+               *major_revision = 1;
+               hw_type = HIF_9000_SILICON_VERSATILE;
+               break;
+       case 0x01:
+       case 0x02: /* CW1x00 */
+       case 0x04: /* CW1x60 */
+               *major_revision = silicon_type;
+               if (silicon_vers)
+                       hw_type = HIF_8601_VERSATILE;
+               else
+                       hw_type = HIF_8601_SILICON;
+               break;
+       default:
+               break;
+       }
+
+       return hw_type;
+}
+
+static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
+{
+       int ret, block, num_blocks;
+       unsigned i;
+       u32 val32;
+       u32 put = 0, get = 0;
+       u8 *buf = NULL;
+       const char *fw_path;
+       const struct firmware *firmware = NULL;
+
+       /* Macroses are local. */
+#define APB_WRITE(reg, val) \
+       do { \
+               ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \
+               if (ret < 0) \
+                       goto error; \
+       } while (0)
+#define APB_READ(reg, val) \
+       do { \
+               ret = cw1200_apb_read_32(priv, CW1200_APB(reg), &(val)); \
+               if (ret < 0) \
+                       goto error; \
+       } while (0)
+#define REG_WRITE(reg, val) \
+       do { \
+               ret = cw1200_reg_write_32(priv, (reg), (val)); \
+               if (ret < 0) \
+                       goto error; \
+       } while (0)
+#define REG_READ(reg, val) \
+       do { \
+               ret = cw1200_reg_read_32(priv, (reg), &(val)); \
+               if (ret < 0) \
+                       goto error; \
+       } while (0)
+
+       switch (priv->hw_revision) {
+       case CW1200_HW_REV_CUT10:
+               fw_path = FIRMWARE_CUT10;
+               if (!priv->sdd_path)
+                       priv->sdd_path = SDD_FILE_10;
+               break;
+       case CW1200_HW_REV_CUT11:
+               fw_path = FIRMWARE_CUT11;
+               if (!priv->sdd_path)
+                       priv->sdd_path = SDD_FILE_11;
+               break;
+       case CW1200_HW_REV_CUT20:
+               fw_path = FIRMWARE_CUT20;
+               if (!priv->sdd_path)
+                       priv->sdd_path = SDD_FILE_20;
+               break;
+       case CW1200_HW_REV_CUT22:
+               fw_path = FIRMWARE_CUT22;
+               if (!priv->sdd_path)
+                       priv->sdd_path = SDD_FILE_22;
+               break;
+       case CW1X60_HW_REV:
+               fw_path = FIRMWARE_CW1X60;
+               if (!priv->sdd_path)
+                       priv->sdd_path = SDD_FILE_CW1X60;
+               break;
+       default:
+               pr_err("Invalid silicon revision %d.\n", priv->hw_revision);
+               return -EINVAL;
+       }
+
+       /* Initialize common registers */
+       APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, DOWNLOAD_ARE_YOU_HERE);
+       APB_WRITE(DOWNLOAD_PUT_REG, 0);
+       APB_WRITE(DOWNLOAD_GET_REG, 0);
+       APB_WRITE(DOWNLOAD_STATUS_REG, DOWNLOAD_PENDING);
+       APB_WRITE(DOWNLOAD_FLAGS_REG, 0);
+
+       /* Write the NOP Instruction */
+       REG_WRITE(ST90TDS_SRAM_BASE_ADDR_REG_ID, 0xFFF20000);
+       REG_WRITE(ST90TDS_AHB_DPORT_REG_ID, 0xEAFFFFFE);
+
+       /* Release CPU from RESET */
+       REG_READ(ST90TDS_CONFIG_REG_ID, val32);
+       val32 &= ~ST90TDS_CONFIG_CPU_RESET_BIT;
+       REG_WRITE(ST90TDS_CONFIG_REG_ID, val32);
+
+       /* Enable Clock */
+       val32 &= ~ST90TDS_CONFIG_CPU_CLK_DIS_BIT;
+       REG_WRITE(ST90TDS_CONFIG_REG_ID, val32);
+
+       /* Load a firmware file */
+       ret = request_firmware(&firmware, fw_path, priv->pdev);
+       if (ret) {
+               pr_err("Can't load firmware file %s.\n", fw_path);
+               goto error;
+       }
+
+       buf = kmalloc(DOWNLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA);
+       if (!buf) {
+               pr_err("Can't allocate firmware load buffer.\n");
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       /* Check if the bootloader is ready */
+       for (i = 0; i < 100; i += 1 + i / 2) {
+               APB_READ(DOWNLOAD_IMAGE_SIZE_REG, val32);
+               if (val32 == DOWNLOAD_I_AM_HERE)
+                       break;
+               mdelay(i);
+       } /* End of for loop */
+
+       if (val32 != DOWNLOAD_I_AM_HERE) {
+               pr_err("Bootloader is not ready.\n");
+               ret = -ETIMEDOUT;
+               goto error;
+       }
+
+       /* Calculcate number of download blocks */
+       num_blocks = (firmware->size - 1) / DOWNLOAD_BLOCK_SIZE + 1;
+
+       /* Updating the length in Download Ctrl Area */
+       val32 = firmware->size; /* Explicit cast from size_t to u32 */
+       APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, val32);
+
+       /* Firmware downloading loop */
+       for (block = 0; block < num_blocks; block++) {
+               size_t tx_size;
+               size_t block_size;
+
+               /* check the download status */
+               APB_READ(DOWNLOAD_STATUS_REG, val32);
+               if (val32 != DOWNLOAD_PENDING) {
+                       pr_err("Bootloader reported error %d.\n", val32);
+                       ret = -EIO;
+                       goto error;
+               }
+
+               /* loop until put - get <= 24K */
+               for (i = 0; i < 100; i++) {
+                       APB_READ(DOWNLOAD_GET_REG, get);
+                       if ((put - get) <=
+                           (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE))
+                               break;
+                       mdelay(i);
+               }
+
+               if ((put - get) > (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) {
+                       pr_err("Timeout waiting for FIFO.\n");
+                       ret = -ETIMEDOUT;
+                       goto error;
+               }
+
+               /* calculate the block size */
+               tx_size = block_size = min((size_t)(firmware->size - put),
+                       (size_t)DOWNLOAD_BLOCK_SIZE);
+
+               memcpy(buf, &firmware->data[put], block_size);
+               if (block_size < DOWNLOAD_BLOCK_SIZE) {
+                       memset(&buf[block_size], 0,
+                              DOWNLOAD_BLOCK_SIZE - block_size);
+                       tx_size = DOWNLOAD_BLOCK_SIZE;
+               }
+
+               /* send the block to sram */
+               ret = cw1200_apb_write(priv,
+                       CW1200_APB(DOWNLOAD_FIFO_OFFSET +
+                                  (put & (DOWNLOAD_FIFO_SIZE - 1))),
+                       buf, tx_size);
+               if (ret < 0) {
+                       pr_err("Can't write firmware block @ %d!\n",
+                              put & (DOWNLOAD_FIFO_SIZE - 1));
+                       goto error;
+               }
+
+               /* update the put register */
+               put += block_size;
+               APB_WRITE(DOWNLOAD_PUT_REG, put);
+       } /* End of firmware download loop */
+
+       /* Wait for the download completion */
+       for (i = 0; i < 300; i += 1 + i / 2) {
+               APB_READ(DOWNLOAD_STATUS_REG, val32);
+               if (val32 != DOWNLOAD_PENDING)
+                       break;
+               mdelay(i);
+       }
+       if (val32 != DOWNLOAD_SUCCESS) {
+               pr_err("Wait for download completion failed: 0x%.8X\n", val32);
+               ret = -ETIMEDOUT;
+               goto error;
+       } else {
+               pr_info("Firmware download completed.\n");
+               ret = 0;
+       }
+
+error:
+       kfree(buf);
+       if (firmware)
+               release_firmware(firmware);
+       return ret;
+
+#undef APB_WRITE
+#undef APB_READ
+#undef REG_WRITE
+#undef REG_READ
+}
+
+
+static int config_reg_read(struct cw1200_common *priv, u32 *val)
+{
+       switch (priv->hw_type) {
+       case HIF_9000_SILICON_VERSATILE: {
+               u16 val16;
+               int ret = cw1200_reg_read_16(priv,
+                                            ST90TDS_CONFIG_REG_ID,
+                                            &val16);
+               if (ret < 0)
+                       return ret;
+               *val = val16;
+               return 0;
+       }
+       case HIF_8601_VERSATILE:
+       case HIF_8601_SILICON:
+       default:
+               cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, val);
+               break;
+       }
+       return 0;
+}
+
+static int config_reg_write(struct cw1200_common *priv, u32 val)
+{
+       switch (priv->hw_type) {
+       case HIF_9000_SILICON_VERSATILE:
+               return cw1200_reg_write_16(priv,
+                                          ST90TDS_CONFIG_REG_ID,
+                                          (u16)val);
+       case HIF_8601_VERSATILE:
+       case HIF_8601_SILICON:
+       default:
+               return cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val);
+               break;
+       }
+       return 0;
+}
+
+int cw1200_load_firmware(struct cw1200_common *priv)
+{
+       int ret;
+       int i;
+       u32 val32;
+       u16 val16;
+       int major_revision = -1;
+
+       /* Read CONFIG Register */
+       ret = cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
+       if (ret < 0) {
+               pr_err("Can't read config register.\n");
+               goto out;
+       }
+
+       if (val32 == 0 || val32 == 0xffffffff) {
+               pr_err("Bad config register value (0x%08x)\n", val32);
+               ret = -EIO;
+               goto out;
+       }
+
+       priv->hw_type = cw1200_get_hw_type(val32, &major_revision);
+       if (priv->hw_type < 0) {
+               pr_err("Can't deduce hardware type.\n");
+               ret = -ENOTSUPP;
+               goto out;
+       }
+
+       /* Set DPLL Reg value, and read back to confirm writes work */
+       ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
+                                 cw1200_dpll_from_clk(priv->hw_refclk));
+       if (ret < 0) {
+               pr_err("Can't write DPLL register.\n");
+               goto out;
+       }
+
+       msleep(20);
+
+       ret = cw1200_reg_read_32(priv,
+               ST90TDS_TSET_GEN_R_W_REG_ID, &val32);
+       if (ret < 0) {
+               pr_err("Can't read DPLL register.\n");
+               goto out;
+       }
+
+       if (val32 != cw1200_dpll_from_clk(priv->hw_refclk)) {
+               pr_err("Unable to initialise DPLL register. Wrote 0x%.8X, Read 0x%.8X.\n",
+                      cw1200_dpll_from_clk(priv->hw_refclk), val32);
+               ret = -EIO;
+               goto out;
+       }
+
+       /* Set wakeup bit in device */
+       ret = cw1200_reg_read_16(priv, ST90TDS_CONTROL_REG_ID, &val16);
+       if (ret < 0) {
+               pr_err("set_wakeup: can't read control register.\n");
+               goto out;
+       }
+
+       ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
+               val16 | ST90TDS_CONT_WUP_BIT);
+       if (ret < 0) {
+               pr_err("set_wakeup: can't write control register.\n");
+               goto out;
+       }
+
+       /* Wait for wakeup */
+       for (i = 0; i < 300; i += (1 + i / 2)) {
+               ret = cw1200_reg_read_16(priv,
+                       ST90TDS_CONTROL_REG_ID, &val16);
+               if (ret < 0) {
+                       pr_err("wait_for_wakeup: can't read control register.\n");
+                       goto out;
+               }
+
+               if (val16 & ST90TDS_CONT_RDY_BIT)
+                       break;
+
+               msleep(i);
+       }
+
+       if ((val16 & ST90TDS_CONT_RDY_BIT) == 0) {
+               pr_err("wait_for_wakeup: device is not responding.\n");
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       switch (major_revision) {
+       case 1:
+               /* CW1200 Hardware detection logic : Check for CUT1.1 */
+               ret = cw1200_ahb_read_32(priv, CW1200_CUT_ID_ADDR, &val32);
+               if (ret) {
+                       pr_err("HW detection: can't read CUT ID.\n");
+                       goto out;
+               }
+
+               switch (val32) {
+               case CW1200_CUT_11_ID_STR:
+                       pr_info("CW1x00 Cut 1.1 silicon detected.\n");
+                       priv->hw_revision = CW1200_HW_REV_CUT11;
+                       break;
+               default:
+                       pr_info("CW1x00 Cut 1.0 silicon detected.\n");
+                       priv->hw_revision = CW1200_HW_REV_CUT10;
+                       break;
+               }
+
+               /* According to ST-E, CUT<2.0 has busted BA TID0-3.
+                  Just disable it entirely...
+               */
+               priv->ba_rx_tid_mask = 0;
+               priv->ba_tx_tid_mask = 0;
+               break;
+       case 2: {
+               u32 ar1, ar2, ar3;
+               ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR, &ar1);
+               if (ret) {
+                       pr_err("(1) HW detection: can't read CUT ID\n");
+                       goto out;
+               }
+               ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 4, &ar2);
+               if (ret) {
+                       pr_err("(2) HW detection: can't read CUT ID.\n");
+                       goto out;
+               }
+
+               ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 8, &ar3);
+               if (ret) {
+                       pr_err("(3) HW detection: can't read CUT ID.\n");
+                       goto out;
+               }
+
+               if (ar1 == CW1200_CUT_22_ID_STR1 &&
+                   ar2 == CW1200_CUT_22_ID_STR2 &&
+                   ar3 == CW1200_CUT_22_ID_STR3) {
+                       pr_info("CW1x00 Cut 2.2 silicon detected.\n");
+                       priv->hw_revision = CW1200_HW_REV_CUT22;
+               } else {
+                       pr_info("CW1x00 Cut 2.0 silicon detected.\n");
+                       priv->hw_revision = CW1200_HW_REV_CUT20;
+               }
+               break;
+       }
+       case 4:
+               pr_info("CW1x60 silicon detected.\n");
+               priv->hw_revision = CW1X60_HW_REV;
+               break;
+       default:
+               pr_err("Unsupported silicon major revision %d.\n",
+                      major_revision);
+               ret = -ENOTSUPP;
+               goto out;
+       }
+
+       /* Checking for access mode */
+       ret = config_reg_read(priv, &val32);
+       if (ret < 0) {
+               pr_err("Can't read config register.\n");
+               goto out;
+       }
+
+       if (!(val32 & ST90TDS_CONFIG_ACCESS_MODE_BIT)) {
+               pr_err("Device is already in QUEUE mode!\n");
+                       ret = -EINVAL;
+                       goto out;
+       }
+
+       switch (priv->hw_type)  {
+       case HIF_8601_SILICON:
+               if (priv->hw_revision == CW1X60_HW_REV) {
+                       pr_err("Can't handle CW1160/1260 firmware load yet.\n");
+                       ret = -ENOTSUPP;
+                       goto out;
+               }
+               ret = cw1200_load_firmware_cw1200(priv);
+               break;
+       default:
+               pr_err("Can't perform firmware load for hw type %d.\n",
+                      priv->hw_type);
+               ret = -ENOTSUPP;
+               goto out;
+       }
+       if (ret < 0) {
+               pr_err("Firmware load error.\n");
+               goto out;
+       }
+
+       /* Enable interrupt signalling */
+       priv->hwbus_ops->lock(priv->hwbus_priv);
+       ret = __cw1200_irq_enable(priv, 1);
+       priv->hwbus_ops->unlock(priv->hwbus_priv);
+       if (ret < 0)
+               goto unsubscribe;
+
+       /* Configure device for MESSSAGE MODE */
+       ret = config_reg_read(priv, &val32);
+       if (ret < 0) {
+               pr_err("Can't read config register.\n");
+               goto unsubscribe;
+       }
+       ret = config_reg_write(priv, val32 & ~ST90TDS_CONFIG_ACCESS_MODE_BIT);
+       if (ret < 0) {
+               pr_err("Can't write config register.\n");
+               goto unsubscribe;
+       }
+
+       /* Unless we read the CONFIG Register we are
+        * not able to get an interrupt
+        */
+       mdelay(10);
+       config_reg_read(priv, &val32);
+
+out:
+       return ret;
+
+unsubscribe:
+       /* Disable interrupt signalling */
+       priv->hwbus_ops->lock(priv->hwbus_priv);
+       ret = __cw1200_irq_enable(priv, 0);
+       priv->hwbus_ops->unlock(priv->hwbus_priv);
+       return ret;
+}
diff --git a/drivers/net/wireless/cw1200/fwio.h b/drivers/net/wireless/cw1200/fwio.h
new file mode 100644 (file)
index 0000000..ea30993
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Firmware API for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef FWIO_H_INCLUDED
+#define FWIO_H_INCLUDED
+
+#define BOOTLOADER_CW1X60       "boot_cw1x60.bin"
+#define FIRMWARE_CW1X60                "wsm_cw1x60.bin"
+#define FIRMWARE_CUT22         "wsm_22.bin"
+#define FIRMWARE_CUT20         "wsm_20.bin"
+#define FIRMWARE_CUT11         "wsm_11.bin"
+#define FIRMWARE_CUT10         "wsm_10.bin"
+#define SDD_FILE_CW1X60                "sdd_cw1x60.bin"
+#define SDD_FILE_22            "sdd_22.bin"
+#define SDD_FILE_20            "sdd_20.bin"
+#define SDD_FILE_11            "sdd_11.bin"
+#define SDD_FILE_10            "sdd_10.bin"
+
+int cw1200_load_firmware(struct cw1200_common *priv);
+
+/* SDD definitions */
+#define SDD_PTA_CFG_ELT_ID 0xEB
+#define SDD_REFERENCE_FREQUENCY_ELT_ID 0xc5
+u32 cw1200_dpll_from_clk(u16 clk);
+
+#endif
diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h
new file mode 100644 (file)
index 0000000..8b2fc83
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Common hwbus abstraction layer interface for cw1200 wireless driver
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_HWBUS_H
+#define CW1200_HWBUS_H
+
+struct hwbus_priv;
+
+void cw1200_irq_handler(struct cw1200_common *priv);
+
+/* This MUST be wrapped with hwbus_ops->lock/unlock! */
+int __cw1200_irq_enable(struct cw1200_common *priv, int enable);
+
+struct hwbus_ops {
+       int (*hwbus_memcpy_fromio)(struct hwbus_priv *self, unsigned int addr,
+                                       void *dst, int count);
+       int (*hwbus_memcpy_toio)(struct hwbus_priv *self, unsigned int addr,
+                                       const void *src, int count);
+       void (*lock)(struct hwbus_priv *self);
+       void (*unlock)(struct hwbus_priv *self);
+       size_t (*align_size)(struct hwbus_priv *self, size_t size);
+       int (*power_mgmt)(struct hwbus_priv *self, bool suspend);
+};
+
+#endif /* CW1200_HWBUS_H */
diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c
new file mode 100644 (file)
index 0000000..ff230b7
--- /dev/null
@@ -0,0 +1,312 @@
+/*
+ * Low-level device IO routines for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver, which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+
+#include "cw1200.h"
+#include "hwio.h"
+#include "hwbus.h"
+
+ /* Sdio addr is 4*spi_addr */
+#define SPI_REG_ADDR_TO_SDIO(spi_reg_addr) ((spi_reg_addr) << 2)
+#define SDIO_ADDR17BIT(buf_id, mpf, rfu, reg_id_ofs) \
+                               ((((buf_id)    & 0x1F) << 7) \
+                               | (((mpf)        & 1) << 6) \
+                               | (((rfu)        & 1) << 5) \
+                               | (((reg_id_ofs) & 0x1F) << 0))
+#define MAX_RETRY              3
+
+
+static int __cw1200_reg_read(struct cw1200_common *priv, u16 addr,
+                            void *buf, size_t buf_len, int buf_id)
+{
+       u16 addr_sdio;
+       u32 sdio_reg_addr_17bit;
+
+       /* Check if buffer is aligned to 4 byte boundary */
+       if (WARN_ON(((unsigned long)buf & 3) && (buf_len > 4))) {
+               pr_err("buffer is not aligned.\n");
+               return -EINVAL;
+       }
+
+       /* Convert to SDIO Register Address */
+       addr_sdio = SPI_REG_ADDR_TO_SDIO(addr);
+       sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio);
+
+       return priv->hwbus_ops->hwbus_memcpy_fromio(priv->hwbus_priv,
+                                                 sdio_reg_addr_17bit,
+                                                 buf, buf_len);
+}
+
+static int __cw1200_reg_write(struct cw1200_common *priv, u16 addr,
+                               const void *buf, size_t buf_len, int buf_id)
+{
+       u16 addr_sdio;
+       u32 sdio_reg_addr_17bit;
+
+       /* Convert to SDIO Register Address */
+       addr_sdio = SPI_REG_ADDR_TO_SDIO(addr);
+       sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio);
+
+       return priv->hwbus_ops->hwbus_memcpy_toio(priv->hwbus_priv,
+                                               sdio_reg_addr_17bit,
+                                               buf, buf_len);
+}
+
+static inline int __cw1200_reg_read_32(struct cw1200_common *priv,
+                                       u16 addr, u32 *val)
+{
+       __le32 tmp;
+       int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0);
+       *val = le32_to_cpu(tmp);
+       return i;
+}
+
+static inline int __cw1200_reg_write_32(struct cw1200_common *priv,
+                                       u16 addr, u32 val)
+{
+       __le32 tmp = cpu_to_le32(val);
+       return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0);
+}
+
+static inline int __cw1200_reg_read_16(struct cw1200_common *priv,
+                                       u16 addr, u16 *val)
+{
+       __le16 tmp;
+       int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0);
+       *val = le16_to_cpu(tmp);
+       return i;
+}
+
+static inline int __cw1200_reg_write_16(struct cw1200_common *priv,
+                                       u16 addr, u16 val)
+{
+       __le16 tmp = cpu_to_le16(val);
+       return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0);
+}
+
+int cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf,
+                       size_t buf_len)
+{
+       int ret;
+       priv->hwbus_ops->lock(priv->hwbus_priv);
+       ret = __cw1200_reg_read(priv, addr, buf, buf_len, 0);
+       priv->hwbus_ops->unlock(priv->hwbus_priv);
+       return ret;
+}
+
+int cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf,
+                       size_t buf_len)
+{
+       int ret;
+       priv->hwbus_ops->lock(priv->hwbus_priv);
+       ret = __cw1200_reg_write(priv, addr, buf, buf_len, 0);
+       priv->hwbus_ops->unlock(priv->hwbus_priv);
+       return ret;
+}
+
+int cw1200_data_read(struct cw1200_common *priv, void *buf, size_t buf_len)
+{
+       int ret, retry = 1;
+       int buf_id_rx = priv->buf_id_rx;
+
+       priv->hwbus_ops->lock(priv->hwbus_priv);
+
+       while (retry <= MAX_RETRY) {
+               ret = __cw1200_reg_read(priv,
+                                       ST90TDS_IN_OUT_QUEUE_REG_ID, buf,
+                                       buf_len, buf_id_rx + 1);
+               if (!ret) {
+                       buf_id_rx = (buf_id_rx + 1) & 3;
+                       priv->buf_id_rx = buf_id_rx;
+                       break;
+               } else {
+                       retry++;
+                       mdelay(1);
+                       pr_err("error :[%d]\n", ret);
+               }
+       }
+
+       priv->hwbus_ops->unlock(priv->hwbus_priv);
+       return ret;
+}
+
+int cw1200_data_write(struct cw1200_common *priv, const void *buf,
+                       size_t buf_len)
+{
+       int ret, retry = 1;
+       int buf_id_tx = priv->buf_id_tx;
+
+       priv->hwbus_ops->lock(priv->hwbus_priv);
+
+       while (retry <= MAX_RETRY) {
+               ret = __cw1200_reg_write(priv,
+                                        ST90TDS_IN_OUT_QUEUE_REG_ID, buf,
+                                        buf_len, buf_id_tx);
+               if (!ret) {
+                       buf_id_tx = (buf_id_tx + 1) & 31;
+                       priv->buf_id_tx = buf_id_tx;
+                       break;
+               } else {
+                       retry++;
+                       mdelay(1);
+                       pr_err("error :[%d]\n", ret);
+               }
+       }
+
+       priv->hwbus_ops->unlock(priv->hwbus_priv);
+       return ret;
+}
+
+int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf,
+                        size_t buf_len, u32 prefetch, u16 port_addr)
+{
+       u32 val32 = 0;
+       int i, ret;
+
+       if ((buf_len / 2) >= 0x1000) {
+               pr_err("Can't read more than 0xfff words.\n");
+               return -EINVAL;
+       }
+
+       priv->hwbus_ops->lock(priv->hwbus_priv);
+       /* Write address */
+       ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr);
+       if (ret < 0) {
+               pr_err("Can't write address register.\n");
+               goto out;
+       }
+
+       /* Read CONFIG Register Value - We will read 32 bits */
+       ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
+       if (ret < 0) {
+               pr_err("Can't read config register.\n");
+               goto out;
+       }
+
+       /* Set PREFETCH bit */
+       ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID,
+                                       val32 | prefetch);
+       if (ret < 0) {
+               pr_err("Can't write prefetch bit.\n");
+               goto out;
+       }
+
+       /* Check for PRE-FETCH bit to be cleared */
+       for (i = 0; i < 20; i++) {
+               ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
+               if (ret < 0) {
+                       pr_err("Can't check prefetch bit.\n");
+                       goto out;
+               }
+               if (!(val32 & prefetch))
+                       break;
+
+               mdelay(i);
+       }
+
+       if (val32 & prefetch) {
+               pr_err("Prefetch bit is not cleared.\n");
+               goto out;
+       }
+
+       /* Read data port */
+       ret = __cw1200_reg_read(priv, port_addr, buf, buf_len, 0);
+       if (ret < 0) {
+               pr_err("Can't read data port.\n");
+               goto out;
+       }
+
+out:
+       priv->hwbus_ops->unlock(priv->hwbus_priv);
+       return ret;
+}
+
+int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf,
+                       size_t buf_len)
+{
+       int ret;
+
+       if ((buf_len / 2) >= 0x1000) {
+               pr_err("Can't write more than 0xfff words.\n");
+               return -EINVAL;
+       }
+
+       priv->hwbus_ops->lock(priv->hwbus_priv);
+
+       /* Write address */
+       ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr);
+       if (ret < 0) {
+               pr_err("Can't write address register.\n");
+               goto out;
+       }
+
+       /* Write data port */
+       ret = __cw1200_reg_write(priv, ST90TDS_SRAM_DPORT_REG_ID,
+                                       buf, buf_len, 0);
+       if (ret < 0) {
+               pr_err("Can't write data port.\n");
+               goto out;
+       }
+
+out:
+       priv->hwbus_ops->unlock(priv->hwbus_priv);
+       return ret;
+}
+
+int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
+{
+       u32 val32;
+       u16 val16;
+       int ret;
+
+       if (HIF_8601_SILICON == priv->hw_type) {
+               ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
+               if (ret < 0) {
+                       pr_err("Can't read config register.\n");
+                       return ret;
+               }
+
+               if (enable)
+                       val32 |= ST90TDS_CONF_IRQ_RDY_ENABLE;
+               else
+                       val32 &= ~ST90TDS_CONF_IRQ_RDY_ENABLE;
+
+               ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val32);
+               if (ret < 0) {
+                       pr_err("Can't write config register.\n");
+                       return ret;
+               }
+       } else {
+               ret = __cw1200_reg_read_16(priv, ST90TDS_CONFIG_REG_ID, &val16);
+               if (ret < 0) {
+                       pr_err("Can't read control register.\n");
+                       return ret;
+               }
+
+               if (enable)
+                       val16 |= ST90TDS_CONT_IRQ_RDY_ENABLE;
+               else
+                       val16 &= ~ST90TDS_CONT_IRQ_RDY_ENABLE;
+
+               ret = __cw1200_reg_write_16(priv, ST90TDS_CONFIG_REG_ID, val16);
+               if (ret < 0) {
+                       pr_err("Can't write control register.\n");
+                       return ret;
+               }
+       }
+       return 0;
+}
diff --git a/drivers/net/wireless/cw1200/hwio.h b/drivers/net/wireless/cw1200/hwio.h
new file mode 100644 (file)
index 0000000..ddf5266
--- /dev/null
@@ -0,0 +1,247 @@
+/*
+ * Low-level API for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on:
+ * ST-Ericsson UMAC CW1200 driver which is
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_HWIO_H_INCLUDED
+#define CW1200_HWIO_H_INCLUDED
+
+/* extern */ struct cw1200_common;
+
+#define CW1200_CUT_11_ID_STR           (0x302E3830)
+#define CW1200_CUT_22_ID_STR1          (0x302e3132)
+#define CW1200_CUT_22_ID_STR2          (0x32302e30)
+#define CW1200_CUT_22_ID_STR3          (0x3335)
+#define CW1200_CUT_ID_ADDR             (0xFFF17F90)
+#define CW1200_CUT2_ID_ADDR            (0xFFF1FF90)
+
+/* Download control area */
+/* boot loader start address in SRAM */
+#define DOWNLOAD_BOOT_LOADER_OFFSET    (0x00000000)
+/* 32K, 0x4000 to 0xDFFF */
+#define DOWNLOAD_FIFO_OFFSET           (0x00004000)
+/* 32K */
+#define DOWNLOAD_FIFO_SIZE             (0x00008000)
+/* 128 bytes, 0xFF80 to 0xFFFF */
+#define DOWNLOAD_CTRL_OFFSET           (0x0000FF80)
+#define DOWNLOAD_CTRL_DATA_DWORDS      (32-6)
+
+struct download_cntl_t {
+       /* size of whole firmware file (including Cheksum), host init */
+       u32 image_size;
+       /* downloading flags */
+       u32 flags;
+       /* No. of bytes put into the download, init & updated by host */
+       u32 put;
+       /* last traced program counter, last ARM reg_pc */
+       u32 trace_pc;
+       /* No. of bytes read from the download, host init, device updates */
+       u32 get;
+       /* r0, boot losader status, host init to pending, device updates */
+       u32 status;
+       /* Extra debug info, r1 to r14 if status=r0=DOWNLOAD_EXCEPTION */
+       u32 debug_data[DOWNLOAD_CTRL_DATA_DWORDS];
+};
+
+#define        DOWNLOAD_IMAGE_SIZE_REG         \
+       (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, image_size))
+#define        DOWNLOAD_FLAGS_REG              \
+       (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, flags))
+#define DOWNLOAD_PUT_REG               \
+       (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, put))
+#define DOWNLOAD_TRACE_PC_REG          \
+       (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, trace_pc))
+#define        DOWNLOAD_GET_REG                \
+       (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, get))
+#define        DOWNLOAD_STATUS_REG             \
+       (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, status))
+#define DOWNLOAD_DEBUG_DATA_REG                \
+       (DOWNLOAD_CTRL_OFFSET + offsetof(struct download_cntl_t, debug_data))
+#define DOWNLOAD_DEBUG_DATA_LEN                (108)
+
+#define DOWNLOAD_BLOCK_SIZE            (1024)
+
+/* For boot loader detection */
+#define DOWNLOAD_ARE_YOU_HERE          (0x87654321)
+#define DOWNLOAD_I_AM_HERE             (0x12345678)
+
+/* Download error code */
+#define DOWNLOAD_PENDING               (0xFFFFFFFF)
+#define DOWNLOAD_SUCCESS               (0)
+#define DOWNLOAD_EXCEPTION             (1)
+#define DOWNLOAD_ERR_MEM_1             (2)
+#define DOWNLOAD_ERR_MEM_2             (3)
+#define DOWNLOAD_ERR_SOFTWARE          (4)
+#define DOWNLOAD_ERR_FILE_SIZE         (5)
+#define DOWNLOAD_ERR_CHECKSUM          (6)
+#define DOWNLOAD_ERR_OVERFLOW          (7)
+#define DOWNLOAD_ERR_IMAGE             (8)
+#define DOWNLOAD_ERR_HOST              (9)
+#define DOWNLOAD_ERR_ABORT             (10)
+
+
+#define SYS_BASE_ADDR_SILICON          (0)
+#define PAC_BASE_ADDRESS_SILICON       (SYS_BASE_ADDR_SILICON + 0x09000000)
+#define PAC_SHARED_MEMORY_SILICON      (PAC_BASE_ADDRESS_SILICON)
+
+#define CW1200_APB(addr)               (PAC_SHARED_MEMORY_SILICON + (addr))
+
+/* Device register definitions */
+
+/* WBF - SPI Register Addresses */
+#define ST90TDS_ADDR_ID_BASE           (0x0000)
+/* 16/32 bits */
+#define ST90TDS_CONFIG_REG_ID          (0x0000)
+/* 16/32 bits */
+#define ST90TDS_CONTROL_REG_ID         (0x0001)
+/* 16 bits, Q mode W/R */
+#define ST90TDS_IN_OUT_QUEUE_REG_ID    (0x0002)
+/* 32 bits, AHB bus R/W */
+#define ST90TDS_AHB_DPORT_REG_ID       (0x0003)
+/* 16/32 bits */
+#define ST90TDS_SRAM_BASE_ADDR_REG_ID   (0x0004)
+/* 32 bits, APB bus R/W */
+#define ST90TDS_SRAM_DPORT_REG_ID      (0x0005)
+/* 32 bits, t_settle/general */
+#define ST90TDS_TSET_GEN_R_W_REG_ID    (0x0006)
+/* 16 bits, Q mode read, no length */
+#define ST90TDS_FRAME_OUT_REG_ID       (0x0007)
+#define ST90TDS_ADDR_ID_MAX            (ST90TDS_FRAME_OUT_REG_ID)
+
+/* WBF - Control register bit set */
+/* next o/p length, bit 11 to 0 */
+#define ST90TDS_CONT_NEXT_LEN_MASK     (0x0FFF)
+#define ST90TDS_CONT_WUP_BIT           (BIT(12))
+#define ST90TDS_CONT_RDY_BIT           (BIT(13))
+#define ST90TDS_CONT_IRQ_ENABLE                (BIT(14))
+#define ST90TDS_CONT_RDY_ENABLE                (BIT(15))
+#define ST90TDS_CONT_IRQ_RDY_ENABLE    (BIT(14)|BIT(15))
+
+/* SPI Config register bit set */
+#define ST90TDS_CONFIG_FRAME_BIT       (BIT(2))
+#define ST90TDS_CONFIG_WORD_MODE_BITS  (BIT(3)|BIT(4))
+#define ST90TDS_CONFIG_WORD_MODE_1     (BIT(3))
+#define ST90TDS_CONFIG_WORD_MODE_2     (BIT(4))
+#define ST90TDS_CONFIG_ERROR_0_BIT     (BIT(5))
+#define ST90TDS_CONFIG_ERROR_1_BIT     (BIT(6))
+#define ST90TDS_CONFIG_ERROR_2_BIT     (BIT(7))
+/* TBD: Sure??? */
+#define ST90TDS_CONFIG_CSN_FRAME_BIT   (BIT(7))
+#define ST90TDS_CONFIG_ERROR_3_BIT     (BIT(8))
+#define ST90TDS_CONFIG_ERROR_4_BIT     (BIT(9))
+/* QueueM */
+#define ST90TDS_CONFIG_ACCESS_MODE_BIT (BIT(10))
+/* AHB bus */
+#define ST90TDS_CONFIG_AHB_PRFETCH_BIT (BIT(11))
+#define ST90TDS_CONFIG_CPU_CLK_DIS_BIT (BIT(12))
+/* APB bus */
+#define ST90TDS_CONFIG_PRFETCH_BIT     (BIT(13))
+/* cpu reset */
+#define ST90TDS_CONFIG_CPU_RESET_BIT   (BIT(14))
+#define ST90TDS_CONFIG_CLEAR_INT_BIT   (BIT(15))
+
+/* For CW1200 the IRQ Enable and Ready Bits are in CONFIG register */
+#define ST90TDS_CONF_IRQ_ENABLE                (BIT(16))
+#define ST90TDS_CONF_RDY_ENABLE                (BIT(17))
+#define ST90TDS_CONF_IRQ_RDY_ENABLE    (BIT(16)|BIT(17))
+
+int cw1200_data_read(struct cw1200_common *priv,
+                    void *buf, size_t buf_len);
+int cw1200_data_write(struct cw1200_common *priv,
+                     const void *buf, size_t buf_len);
+
+int cw1200_reg_read(struct cw1200_common *priv, u16 addr,
+                   void *buf, size_t buf_len);
+int cw1200_reg_write(struct cw1200_common *priv, u16 addr,
+                    const void *buf, size_t buf_len);
+
+static inline int cw1200_reg_read_16(struct cw1200_common *priv,
+                                    u16 addr, u16 *val)
+{
+       __le32 tmp;
+       int i;
+       i = cw1200_reg_read(priv, addr, &tmp, sizeof(tmp));
+       *val = le32_to_cpu(tmp) & 0xfffff;
+       return i;
+}
+
+static inline int cw1200_reg_write_16(struct cw1200_common *priv,
+                                     u16 addr, u16 val)
+{
+       __le32 tmp = cpu_to_le32((u32)val);
+       return cw1200_reg_write(priv, addr, &tmp, sizeof(tmp));
+}
+
+static inline int cw1200_reg_read_32(struct cw1200_common *priv,
+                                    u16 addr, u32 *val)
+{
+       __le32 tmp;
+       int i = cw1200_reg_read(priv, addr, &tmp, sizeof(tmp));
+       *val = le32_to_cpu(tmp);
+       return i;
+}
+
+static inline int cw1200_reg_write_32(struct cw1200_common *priv,
+                                     u16 addr, u32 val)
+{
+       __le32 tmp = cpu_to_le32(val);
+       return cw1200_reg_write(priv, addr, &tmp, sizeof(val));
+}
+
+int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf,
+                        size_t buf_len, u32 prefetch, u16 port_addr);
+int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf,
+                    size_t buf_len);
+
+static inline int cw1200_apb_read(struct cw1200_common *priv, u32 addr,
+                                 void *buf, size_t buf_len)
+{
+       return cw1200_indirect_read(priv, addr, buf, buf_len,
+                                   ST90TDS_CONFIG_PRFETCH_BIT,
+                                   ST90TDS_SRAM_DPORT_REG_ID);
+}
+
+static inline int cw1200_ahb_read(struct cw1200_common *priv, u32 addr,
+                                 void *buf, size_t buf_len)
+{
+       return cw1200_indirect_read(priv, addr, buf, buf_len,
+                                   ST90TDS_CONFIG_AHB_PRFETCH_BIT,
+                                   ST90TDS_AHB_DPORT_REG_ID);
+}
+
+static inline int cw1200_apb_read_32(struct cw1200_common *priv,
+                                    u32 addr, u32 *val)
+{
+       __le32 tmp;
+       int i = cw1200_apb_read(priv, addr, &tmp, sizeof(tmp));
+       *val = le32_to_cpu(tmp);
+       return i;
+}
+
+static inline int cw1200_apb_write_32(struct cw1200_common *priv,
+                                     u32 addr, u32 val)
+{
+       __le32 tmp = cpu_to_le32(val);
+       return cw1200_apb_write(priv, addr, &tmp, sizeof(val));
+}
+static inline int cw1200_ahb_read_32(struct cw1200_common *priv,
+                                    u32 addr, u32 *val)
+{
+       __le32 tmp;
+       int i = cw1200_ahb_read(priv, addr, &tmp, sizeof(tmp));
+       *val = le32_to_cpu(tmp);
+       return i;
+}
+
+#endif /* CW1200_HWIO_H_INCLUDED */
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
new file mode 100644 (file)
index 0000000..3724e73
--- /dev/null
@@ -0,0 +1,605 @@
+/*
+ * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on:
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
+ * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * Based on:
+ * - the islsm (softmac prism54) driver, which is:
+ *   Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
+ * - stlc45xx driver
+ *   Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "cw1200.h"
+#include "txrx.h"
+#include "hwbus.h"
+#include "fwio.h"
+#include "hwio.h"
+#include "bh.h"
+#include "sta.h"
+#include "scan.h"
+#include "debug.h"
+#include "pm.h"
+
+MODULE_AUTHOR("Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>");
+MODULE_DESCRIPTION("Softmac ST-Ericsson CW1200 common code");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("cw1200_core");
+
+/* Accept MAC address of the form macaddr=0x00,0x80,0xE1,0x30,0x40,0x50 */
+static u8 cw1200_mac_template[ETH_ALEN] = {0x02, 0x80, 0xe1, 0x00, 0x00, 0x00};
+module_param_array_named(macaddr, cw1200_mac_template, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(macaddr, "Override platform_data MAC address");
+
+static char *cw1200_sdd_path;
+module_param(cw1200_sdd_path, charp, 0644);
+MODULE_PARM_DESC(cw1200_sdd_path, "Override platform_data SDD file");
+static int cw1200_refclk;
+module_param(cw1200_refclk, int, 0644);
+MODULE_PARM_DESC(cw1200_refclk, "Override platform_data reference clock");
+
+int cw1200_power_mode = wsm_power_mode_quiescent;
+module_param(cw1200_power_mode, int, 0644);
+MODULE_PARM_DESC(cw1200_power_mode, "WSM power mode.  0 == active, 1 == doze, 2 == quiescent (default)");
+
+#define RATETAB_ENT(_rate, _rateid, _flags)            \
+       {                                               \
+               .bitrate        = (_rate),              \
+               .hw_value       = (_rateid),            \
+               .flags          = (_flags),             \
+       }
+
+static struct ieee80211_rate cw1200_rates[] = {
+       RATETAB_ENT(10,  0,   0),
+       RATETAB_ENT(20,  1,   0),
+       RATETAB_ENT(55,  2,   0),
+       RATETAB_ENT(110, 3,   0),
+       RATETAB_ENT(60,  6,  0),
+       RATETAB_ENT(90,  7,  0),
+       RATETAB_ENT(120, 8,  0),
+       RATETAB_ENT(180, 9,  0),
+       RATETAB_ENT(240, 10, 0),
+       RATETAB_ENT(360, 11, 0),
+       RATETAB_ENT(480, 12, 0),
+       RATETAB_ENT(540, 13, 0),
+};
+
+static struct ieee80211_rate cw1200_mcs_rates[] = {
+       RATETAB_ENT(65,  14, IEEE80211_TX_RC_MCS),
+       RATETAB_ENT(130, 15, IEEE80211_TX_RC_MCS),
+       RATETAB_ENT(195, 16, IEEE80211_TX_RC_MCS),
+       RATETAB_ENT(260, 17, IEEE80211_TX_RC_MCS),
+       RATETAB_ENT(390, 18, IEEE80211_TX_RC_MCS),
+       RATETAB_ENT(520, 19, IEEE80211_TX_RC_MCS),
+       RATETAB_ENT(585, 20, IEEE80211_TX_RC_MCS),
+       RATETAB_ENT(650, 21, IEEE80211_TX_RC_MCS),
+};
+
+#define cw1200_a_rates         (cw1200_rates + 4)
+#define cw1200_a_rates_size    (ARRAY_SIZE(cw1200_rates) - 4)
+#define cw1200_g_rates         (cw1200_rates + 0)
+#define cw1200_g_rates_size    (ARRAY_SIZE(cw1200_rates))
+#define cw1200_n_rates         (cw1200_mcs_rates)
+#define cw1200_n_rates_size    (ARRAY_SIZE(cw1200_mcs_rates))
+
+
+#define CHAN2G(_channel, _freq, _flags) {                      \
+       .band                   = IEEE80211_BAND_2GHZ,          \
+       .center_freq            = (_freq),                      \
+       .hw_value               = (_channel),                   \
+       .flags                  = (_flags),                     \
+       .max_antenna_gain       = 0,                            \
+       .max_power              = 30,                           \
+}
+
+#define CHAN5G(_channel, _flags) {                             \
+       .band                   = IEEE80211_BAND_5GHZ,          \
+       .center_freq    = 5000 + (5 * (_channel)),              \
+       .hw_value               = (_channel),                   \
+       .flags                  = (_flags),                     \
+       .max_antenna_gain       = 0,                            \
+       .max_power              = 30,                           \
+}
+
+static struct ieee80211_channel cw1200_2ghz_chantable[] = {
+       CHAN2G(1, 2412, 0),
+       CHAN2G(2, 2417, 0),
+       CHAN2G(3, 2422, 0),
+       CHAN2G(4, 2427, 0),
+       CHAN2G(5, 2432, 0),
+       CHAN2G(6, 2437, 0),
+       CHAN2G(7, 2442, 0),
+       CHAN2G(8, 2447, 0),
+       CHAN2G(9, 2452, 0),
+       CHAN2G(10, 2457, 0),
+       CHAN2G(11, 2462, 0),
+       CHAN2G(12, 2467, 0),
+       CHAN2G(13, 2472, 0),
+       CHAN2G(14, 2484, 0),
+};
+
+static struct ieee80211_channel cw1200_5ghz_chantable[] = {
+       CHAN5G(34, 0),          CHAN5G(36, 0),
+       CHAN5G(38, 0),          CHAN5G(40, 0),
+       CHAN5G(42, 0),          CHAN5G(44, 0),
+       CHAN5G(46, 0),          CHAN5G(48, 0),
+       CHAN5G(52, 0),          CHAN5G(56, 0),
+       CHAN5G(60, 0),          CHAN5G(64, 0),
+       CHAN5G(100, 0),         CHAN5G(104, 0),
+       CHAN5G(108, 0),         CHAN5G(112, 0),
+       CHAN5G(116, 0),         CHAN5G(120, 0),
+       CHAN5G(124, 0),         CHAN5G(128, 0),
+       CHAN5G(132, 0),         CHAN5G(136, 0),
+       CHAN5G(140, 0),         CHAN5G(149, 0),
+       CHAN5G(153, 0),         CHAN5G(157, 0),
+       CHAN5G(161, 0),         CHAN5G(165, 0),
+       CHAN5G(184, 0),         CHAN5G(188, 0),
+       CHAN5G(192, 0),         CHAN5G(196, 0),
+       CHAN5G(200, 0),         CHAN5G(204, 0),
+       CHAN5G(208, 0),         CHAN5G(212, 0),
+       CHAN5G(216, 0),
+};
+
+static struct ieee80211_supported_band cw1200_band_2ghz = {
+       .channels = cw1200_2ghz_chantable,
+       .n_channels = ARRAY_SIZE(cw1200_2ghz_chantable),
+       .bitrates = cw1200_g_rates,
+       .n_bitrates = cw1200_g_rates_size,
+       .ht_cap = {
+               .cap = IEEE80211_HT_CAP_GRN_FLD |
+                       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) |
+                       IEEE80211_HT_CAP_MAX_AMSDU,
+               .ht_supported = 1,
+               .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
+               .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
+               .mcs = {
+                       .rx_mask[0] = 0xFF,
+                       .rx_highest = __cpu_to_le16(0x41),
+                       .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+               },
+       },
+};
+
+static struct ieee80211_supported_band cw1200_band_5ghz = {
+       .channels = cw1200_5ghz_chantable,
+       .n_channels = ARRAY_SIZE(cw1200_5ghz_chantable),
+       .bitrates = cw1200_a_rates,
+       .n_bitrates = cw1200_a_rates_size,
+       .ht_cap = {
+               .cap = IEEE80211_HT_CAP_GRN_FLD |
+                       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) |
+                       IEEE80211_HT_CAP_MAX_AMSDU,
+               .ht_supported = 1,
+               .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
+               .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
+               .mcs = {
+                       .rx_mask[0] = 0xFF,
+                       .rx_highest = __cpu_to_le16(0x41),
+                       .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+               },
+       },
+};
+
+static const unsigned long cw1200_ttl[] = {
+       1 * HZ, /* VO */
+       2 * HZ, /* VI */
+       5 * HZ, /* BE */
+       10 * HZ /* BK */
+};
+
+static const struct ieee80211_ops cw1200_ops = {
+       .start                  = cw1200_start,
+       .stop                   = cw1200_stop,
+       .add_interface          = cw1200_add_interface,
+       .remove_interface       = cw1200_remove_interface,
+       .change_interface       = cw1200_change_interface,
+       .tx                     = cw1200_tx,
+       .hw_scan                = cw1200_hw_scan,
+       .set_tim                = cw1200_set_tim,
+       .sta_notify             = cw1200_sta_notify,
+       .sta_add                = cw1200_sta_add,
+       .sta_remove             = cw1200_sta_remove,
+       .set_key                = cw1200_set_key,
+       .set_rts_threshold      = cw1200_set_rts_threshold,
+       .config                 = cw1200_config,
+       .bss_info_changed       = cw1200_bss_info_changed,
+       .prepare_multicast      = cw1200_prepare_multicast,
+       .configure_filter       = cw1200_configure_filter,
+       .conf_tx                = cw1200_conf_tx,
+       .get_stats              = cw1200_get_stats,
+       .ampdu_action           = cw1200_ampdu_action,
+       .flush                  = cw1200_flush,
+#ifdef CONFIG_PM
+       .suspend                = cw1200_wow_suspend,
+       .resume                 = cw1200_wow_resume,
+#endif
+       /* Intentionally not offloaded:                                 */
+       /*.channel_switch       = cw1200_channel_switch,                */
+       /*.remain_on_channel    = cw1200_remain_on_channel,             */
+       /*.cancel_remain_on_channel = cw1200_cancel_remain_on_channel,  */
+};
+
+static int cw1200_ba_rx_tids = -1;
+static int cw1200_ba_tx_tids = -1;
+module_param(cw1200_ba_rx_tids, int, 0644);
+module_param(cw1200_ba_tx_tids, int, 0644);
+MODULE_PARM_DESC(cw1200_ba_rx_tids, "Block ACK RX TIDs");
+MODULE_PARM_DESC(cw1200_ba_tx_tids, "Block ACK TX TIDs");
+
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support cw1200_wowlan_support = {
+       /* Support only for limited wowlan functionalities */
+       .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT,
+};
+#endif
+
+
+static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
+                                               const bool have_5ghz)
+{
+       int i, band;
+       struct ieee80211_hw *hw;
+       struct cw1200_common *priv;
+
+       hw = ieee80211_alloc_hw(sizeof(struct cw1200_common), &cw1200_ops);
+       if (!hw)
+               return NULL;
+
+       priv = hw->priv;
+       priv->hw = hw;
+       priv->hw_type = -1;
+       priv->mode = NL80211_IFTYPE_UNSPECIFIED;
+       priv->rates = cw1200_rates; /* TODO: fetch from FW */
+       priv->mcs_rates = cw1200_n_rates;
+       if (cw1200_ba_rx_tids != -1)
+               priv->ba_rx_tid_mask = cw1200_ba_rx_tids;
+       else
+               priv->ba_rx_tid_mask = 0xFF; /* Enable RX BLKACK for all TIDs */
+       if (cw1200_ba_tx_tids != -1)
+               priv->ba_tx_tid_mask = cw1200_ba_tx_tids;
+       else
+               priv->ba_tx_tid_mask = 0xff; /* Enable TX BLKACK for all TIDs */
+
+       hw->flags = IEEE80211_HW_SIGNAL_DBM |
+                   IEEE80211_HW_SUPPORTS_PS |
+                   IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+                   IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+                   IEEE80211_HW_SUPPORTS_UAPSD |
+                   IEEE80211_HW_CONNECTION_MONITOR |
+                   IEEE80211_HW_AMPDU_AGGREGATION |
+                   IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
+                   IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC;
+
+       hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                         BIT(NL80211_IFTYPE_ADHOC) |
+                                         BIT(NL80211_IFTYPE_AP) |
+                                         BIT(NL80211_IFTYPE_MESH_POINT) |
+                                         BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                                         BIT(NL80211_IFTYPE_P2P_GO);
+
+#ifdef CONFIG_PM
+       hw->wiphy->wowlan = &cw1200_wowlan_support;
+#endif
+
+       hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+
+       hw->channel_change_time = 1000; /* TODO: find actual value */
+       hw->queues = 4;
+
+       priv->rts_threshold = -1;
+
+       hw->max_rates = 8;
+       hw->max_rate_tries = 15;
+       hw->extra_tx_headroom = WSM_TX_EXTRA_HEADROOM +
+               8;  /* TKIP IV */
+
+       hw->sta_data_size = sizeof(struct cw1200_sta_priv);
+
+       hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz;
+       if (have_5ghz)
+               hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz;
+
+       /* Channel params have to be cleared before registering wiphy again */
+       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
+               if (!sband)
+                       continue;
+               for (i = 0; i < sband->n_channels; i++) {
+                       sband->channels[i].flags = 0;
+                       sband->channels[i].max_antenna_gain = 0;
+                       sband->channels[i].max_power = 30;
+               }
+       }
+
+       hw->wiphy->max_scan_ssids = 2;
+       hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+
+       if (macaddr)
+               SET_IEEE80211_PERM_ADDR(hw, (u8 *)macaddr);
+       else
+               SET_IEEE80211_PERM_ADDR(hw, cw1200_mac_template);
+
+       /* Fix up mac address if necessary */
+       if (hw->wiphy->perm_addr[3] == 0 &&
+           hw->wiphy->perm_addr[4] == 0 &&
+           hw->wiphy->perm_addr[5] == 0) {
+               get_random_bytes(&hw->wiphy->perm_addr[3], 3);
+       }
+
+       mutex_init(&priv->wsm_cmd_mux);
+       mutex_init(&priv->conf_mutex);
+       priv->workqueue = create_singlethread_workqueue("cw1200_wq");
+       sema_init(&priv->scan.lock, 1);
+       INIT_WORK(&priv->scan.work, cw1200_scan_work);
+       INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work);
+       INIT_DELAYED_WORK(&priv->scan.timeout, cw1200_scan_timeout);
+       INIT_DELAYED_WORK(&priv->clear_recent_scan_work,
+                         cw1200_clear_recent_scan_work);
+       INIT_DELAYED_WORK(&priv->join_timeout, cw1200_join_timeout);
+       INIT_WORK(&priv->unjoin_work, cw1200_unjoin_work);
+       INIT_WORK(&priv->join_complete_work, cw1200_join_complete_work);
+       INIT_WORK(&priv->wep_key_work, cw1200_wep_key_work);
+       INIT_WORK(&priv->tx_policy_upload_work, tx_policy_upload_work);
+       spin_lock_init(&priv->event_queue_lock);
+       INIT_LIST_HEAD(&priv->event_queue);
+       INIT_WORK(&priv->event_handler, cw1200_event_handler);
+       INIT_DELAYED_WORK(&priv->bss_loss_work, cw1200_bss_loss_work);
+       INIT_WORK(&priv->bss_params_work, cw1200_bss_params_work);
+       spin_lock_init(&priv->bss_loss_lock);
+       spin_lock_init(&priv->ps_state_lock);
+       INIT_WORK(&priv->set_cts_work, cw1200_set_cts_work);
+       INIT_WORK(&priv->set_tim_work, cw1200_set_tim_work);
+       INIT_WORK(&priv->multicast_start_work, cw1200_multicast_start_work);
+       INIT_WORK(&priv->multicast_stop_work, cw1200_multicast_stop_work);
+       INIT_WORK(&priv->link_id_work, cw1200_link_id_work);
+       INIT_DELAYED_WORK(&priv->link_id_gc_work, cw1200_link_id_gc_work);
+       INIT_WORK(&priv->linkid_reset_work, cw1200_link_id_reset);
+       INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work);
+       INIT_WORK(&priv->set_beacon_wakeup_period_work,
+                 cw1200_set_beacon_wakeup_period_work);
+       init_timer(&priv->mcast_timeout);
+       priv->mcast_timeout.data = (unsigned long)priv;
+       priv->mcast_timeout.function = cw1200_mcast_timeout;
+
+       if (cw1200_queue_stats_init(&priv->tx_queue_stats,
+                                   CW1200_LINK_ID_MAX,
+                                   cw1200_skb_dtor,
+                                   priv)) {
+               ieee80211_free_hw(hw);
+               return NULL;
+       }
+
+       for (i = 0; i < 4; ++i) {
+               if (cw1200_queue_init(&priv->tx_queue[i],
+                                     &priv->tx_queue_stats, i, 16,
+                                     cw1200_ttl[i])) {
+                       for (; i > 0; i--)
+                               cw1200_queue_deinit(&priv->tx_queue[i - 1]);
+                       cw1200_queue_stats_deinit(&priv->tx_queue_stats);
+                       ieee80211_free_hw(hw);
+                       return NULL;
+               }
+       }
+
+       init_waitqueue_head(&priv->channel_switch_done);
+       init_waitqueue_head(&priv->wsm_cmd_wq);
+       init_waitqueue_head(&priv->wsm_startup_done);
+       init_waitqueue_head(&priv->ps_mode_switch_done);
+       wsm_buf_init(&priv->wsm_cmd_buf);
+       spin_lock_init(&priv->wsm_cmd.lock);
+       priv->wsm_cmd.done = 1;
+       tx_policy_init(priv);
+
+       return hw;
+}
+
+static int cw1200_register_common(struct ieee80211_hw *dev)
+{
+       struct cw1200_common *priv = dev->priv;
+       int err;
+
+#ifdef CONFIG_PM
+       err = cw1200_pm_init(&priv->pm_state, priv);
+       if (err) {
+               pr_err("Cannot init PM. (%d).\n",
+                      err);
+               return err;
+       }
+#endif
+
+       err = ieee80211_register_hw(dev);
+       if (err) {
+               pr_err("Cannot register device (%d).\n",
+                      err);
+#ifdef CONFIG_PM
+               cw1200_pm_deinit(&priv->pm_state);
+#endif
+               return err;
+       }
+
+       cw1200_debug_init(priv);
+
+       pr_info("Registered as '%s'\n", wiphy_name(dev->wiphy));
+       return 0;
+}
+
+static void cw1200_free_common(struct ieee80211_hw *dev)
+{
+       ieee80211_free_hw(dev);
+}
+
+static void cw1200_unregister_common(struct ieee80211_hw *dev)
+{
+       struct cw1200_common *priv = dev->priv;
+       int i;
+
+       ieee80211_unregister_hw(dev);
+
+       del_timer_sync(&priv->mcast_timeout);
+       cw1200_unregister_bh(priv);
+
+       cw1200_debug_release(priv);
+
+       mutex_destroy(&priv->conf_mutex);
+
+       wsm_buf_deinit(&priv->wsm_cmd_buf);
+
+       destroy_workqueue(priv->workqueue);
+       priv->workqueue = NULL;
+
+       if (priv->sdd) {
+               release_firmware(priv->sdd);
+               priv->sdd = NULL;
+       }
+
+       for (i = 0; i < 4; ++i)
+               cw1200_queue_deinit(&priv->tx_queue[i]);
+
+       cw1200_queue_stats_deinit(&priv->tx_queue_stats);
+#ifdef CONFIG_PM
+       cw1200_pm_deinit(&priv->pm_state);
+#endif
+}
+
+/* Clock is in KHz */
+u32 cw1200_dpll_from_clk(u16 clk_khz)
+{
+       switch (clk_khz) {
+       case 0x32C8: /* 13000 KHz */
+               return 0x1D89D241;
+       case 0x3E80: /* 16000 KHz */
+               return 0x000001E1;
+       case 0x41A0: /* 16800 KHz */
+               return 0x124931C1;
+       case 0x4B00: /* 19200 KHz */
+               return 0x00000191;
+       case 0x5DC0: /* 24000 KHz */
+               return 0x00000141;
+       case 0x6590: /* 26000 KHz */
+               return 0x0EC4F121;
+       case 0x8340: /* 33600 KHz */
+               return 0x092490E1;
+       case 0x9600: /* 38400 KHz */
+               return 0x100010C1;
+       case 0x9C40: /* 40000 KHz */
+               return 0x000000C1;
+       case 0xBB80: /* 48000 KHz */
+               return 0x000000A1;
+       case 0xCB20: /* 52000 KHz */
+               return 0x07627091;
+       default:
+               pr_err("Unknown Refclk freq (0x%04x), using 2600KHz\n",
+                      clk_khz);
+               return 0x0EC4F121;
+       }
+}
+
+int cw1200_core_probe(const struct hwbus_ops *hwbus_ops,
+                     struct hwbus_priv *hwbus,
+                     struct device *pdev,
+                     struct cw1200_common **core,
+                     int ref_clk, const u8 *macaddr,
+                     const char *sdd_path, bool have_5ghz)
+{
+       int err = -EINVAL;
+       struct ieee80211_hw *dev;
+       struct cw1200_common *priv;
+       struct wsm_operational_mode mode = {
+               .power_mode = cw1200_power_mode,
+               .disable_more_flag_usage = true,
+       };
+
+       dev = cw1200_init_common(macaddr, have_5ghz);
+       if (!dev)
+               goto err;
+
+       priv = dev->priv;
+       priv->hw_refclk = ref_clk;
+       if (cw1200_refclk)
+               priv->hw_refclk = cw1200_refclk;
+
+       priv->sdd_path = (char *)sdd_path;
+       if (cw1200_sdd_path)
+               priv->sdd_path = cw1200_sdd_path;
+
+       priv->hwbus_ops = hwbus_ops;
+       priv->hwbus_priv = hwbus;
+       priv->pdev = pdev;
+       SET_IEEE80211_DEV(priv->hw, pdev);
+
+       /* Pass struct cw1200_common back up */
+       *core = priv;
+
+       err = cw1200_register_bh(priv);
+       if (err)
+               goto err1;
+
+       err = cw1200_load_firmware(priv);
+       if (err)
+               goto err2;
+
+       if (wait_event_interruptible_timeout(priv->wsm_startup_done,
+                                            priv->firmware_ready,
+                                            3*HZ) <= 0) {
+               /* TODO: Need to find how to reset device
+                  in QUEUE mode properly.
+               */
+               pr_err("Timeout waiting on device startup\n");
+               err = -ETIMEDOUT;
+               goto err2;
+       }
+
+       /* Set low-power mode. */
+       wsm_set_operational_mode(priv, &mode);
+
+       /* Enable multi-TX confirmation */
+       wsm_use_multi_tx_conf(priv, true);
+
+       err = cw1200_register_common(dev);
+       if (err)
+               goto err2;
+
+       return err;
+
+err2:
+       cw1200_unregister_bh(priv);
+err1:
+       cw1200_free_common(dev);
+err:
+       *core = NULL;
+       return err;
+}
+EXPORT_SYMBOL_GPL(cw1200_core_probe);
+
+void cw1200_core_release(struct cw1200_common *self)
+{
+       /* Disable device interrupts */
+       self->hwbus_ops->lock(self->hwbus_priv);
+       __cw1200_irq_enable(self, 0);
+       self->hwbus_ops->unlock(self->hwbus_priv);
+
+       /* And then clean up */
+       cw1200_unregister_common(self->hw);
+       cw1200_free_common(self->hw);
+       return;
+}
+EXPORT_SYMBOL_GPL(cw1200_core_release);
diff --git a/drivers/net/wireless/cw1200/pm.c b/drivers/net/wireless/cw1200/pm.c
new file mode 100644 (file)
index 0000000..b37abb9
--- /dev/null
@@ -0,0 +1,367 @@
+/*
+ * Mac80211 power management API for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2011, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/if_ether.h>
+#include "cw1200.h"
+#include "pm.h"
+#include "sta.h"
+#include "bh.h"
+#include "hwbus.h"
+
+#define CW1200_BEACON_SKIPPING_MULTIPLIER 3
+
+struct cw1200_udp_port_filter {
+       struct wsm_udp_port_filter_hdr hdr;
+       /* Up to 4 filters are allowed. */
+       struct wsm_udp_port_filter filters[WSM_MAX_FILTER_ELEMENTS];
+} __packed;
+
+struct cw1200_ether_type_filter {
+       struct wsm_ether_type_filter_hdr hdr;
+       /* Up to 4 filters are allowed. */
+       struct wsm_ether_type_filter filters[WSM_MAX_FILTER_ELEMENTS];
+} __packed;
+
+static struct cw1200_udp_port_filter cw1200_udp_port_filter_on = {
+       .hdr.num = 2,
+       .filters = {
+               [0] = {
+                       .action = WSM_FILTER_ACTION_FILTER_OUT,
+                       .type = WSM_FILTER_PORT_TYPE_DST,
+                       .port = __cpu_to_le16(67), /* DHCP Bootps */
+               },
+               [1] = {
+                       .action = WSM_FILTER_ACTION_FILTER_OUT,
+                       .type = WSM_FILTER_PORT_TYPE_DST,
+                       .port = __cpu_to_le16(68), /* DHCP Bootpc */
+               },
+       }
+};
+
+static struct wsm_udp_port_filter_hdr cw1200_udp_port_filter_off = {
+       .num = 0,
+};
+
+#ifndef ETH_P_WAPI
+#define ETH_P_WAPI     0x88B4
+#endif
+
+static struct cw1200_ether_type_filter cw1200_ether_type_filter_on = {
+       .hdr.num = 4,
+       .filters = {
+               [0] = {
+                       .action = WSM_FILTER_ACTION_FILTER_IN,
+                       .type = __cpu_to_le16(ETH_P_IP),
+               },
+               [1] = {
+                       .action = WSM_FILTER_ACTION_FILTER_IN,
+                       .type = __cpu_to_le16(ETH_P_PAE),
+               },
+               [2] = {
+                       .action = WSM_FILTER_ACTION_FILTER_IN,
+                       .type = __cpu_to_le16(ETH_P_WAPI),
+               },
+               [3] = {
+                       .action = WSM_FILTER_ACTION_FILTER_IN,
+                       .type = __cpu_to_le16(ETH_P_ARP),
+               },
+       },
+};
+
+static struct wsm_ether_type_filter_hdr cw1200_ether_type_filter_off = {
+       .num = 0,
+};
+
+/* private */
+struct cw1200_suspend_state {
+       unsigned long bss_loss_tmo;
+       unsigned long join_tmo;
+       unsigned long direct_probe;
+       unsigned long link_id_gc;
+       bool beacon_skipping;
+       u8 prev_ps_mode;
+};
+
+static void cw1200_pm_stay_awake_tmo(unsigned long arg)
+{
+       /* XXX what's the point of this ? */
+}
+
+int cw1200_pm_init(struct cw1200_pm_state *pm,
+                  struct cw1200_common *priv)
+{
+       spin_lock_init(&pm->lock);
+
+       init_timer(&pm->stay_awake);
+       pm->stay_awake.data = (unsigned long)pm;
+       pm->stay_awake.function = cw1200_pm_stay_awake_tmo;
+
+       return 0;
+}
+
+void cw1200_pm_deinit(struct cw1200_pm_state *pm)
+{
+       del_timer_sync(&pm->stay_awake);
+}
+
+void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
+                         unsigned long tmo)
+{
+       long cur_tmo;
+       spin_lock_bh(&pm->lock);
+       cur_tmo = pm->stay_awake.expires - jiffies;
+       if (!timer_pending(&pm->stay_awake) || cur_tmo < (long)tmo)
+               mod_timer(&pm->stay_awake, jiffies + tmo);
+       spin_unlock_bh(&pm->lock);
+}
+
+static long cw1200_suspend_work(struct delayed_work *work)
+{
+       int ret = cancel_delayed_work(work);
+       long tmo;
+       if (ret > 0) {
+               /* Timer is pending */
+               tmo = work->timer.expires - jiffies;
+               if (tmo < 0)
+                       tmo = 0;
+       } else {
+               tmo = -1;
+       }
+       return tmo;
+}
+
+static int cw1200_resume_work(struct cw1200_common *priv,
+                              struct delayed_work *work,
+                              unsigned long tmo)
+{
+       if ((long)tmo < 0)
+               return 1;
+
+       return queue_delayed_work(priv->workqueue, work, tmo);
+}
+
+int cw1200_can_suspend(struct cw1200_common *priv)
+{
+       if (atomic_read(&priv->bh_rx)) {
+               wiphy_dbg(priv->hw->wiphy, "Suspend interrupted.\n");
+               return 0;
+       }
+       return 1;
+}
+EXPORT_SYMBOL_GPL(cw1200_can_suspend);
+
+int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+       struct cw1200_common *priv = hw->priv;
+       struct cw1200_pm_state *pm_state = &priv->pm_state;
+       struct cw1200_suspend_state *state;
+       int ret;
+
+       spin_lock_bh(&pm_state->lock);
+       ret = timer_pending(&pm_state->stay_awake);
+       spin_unlock_bh(&pm_state->lock);
+       if (ret)
+               return -EAGAIN;
+
+       /* Do not suspend when datapath is not idle */
+       if (priv->tx_queue_stats.num_queued)
+               return -EBUSY;
+
+       /* Make sure there is no configuration requests in progress. */
+       if (!mutex_trylock(&priv->conf_mutex))
+               return -EBUSY;
+
+       /* Ensure pending operations are done.
+        * Note also that wow_suspend must return in ~2.5sec, before
+        * watchdog is triggered.
+        */
+       if (priv->channel_switch_in_progress)
+               goto revert1;
+
+       /* Do not suspend when join is pending */
+       if (priv->join_pending)
+               goto revert1;
+
+       /* Do not suspend when scanning */
+       if (down_trylock(&priv->scan.lock))
+               goto revert1;
+
+       /* Lock TX. */
+       wsm_lock_tx_async(priv);
+
+       /* Wait to avoid possible race with bh code.
+        * But do not wait too long...
+        */
+       if (wait_event_timeout(priv->bh_evt_wq,
+                              !priv->hw_bufs_used, HZ / 10) <= 0)
+               goto revert2;
+
+       /* Set UDP filter */
+       wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_on.hdr);
+
+       /* Set ethernet frame type filter */
+       wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_on.hdr);
+
+       /* Allocate state */
+       state = kzalloc(sizeof(struct cw1200_suspend_state), GFP_KERNEL);
+       if (!state)
+               goto revert3;
+
+       /* Change to legacy PS while going to suspend */
+       if (!priv->vif->p2p &&
+           priv->join_status == CW1200_JOIN_STATUS_STA &&
+           priv->powersave_mode.mode != WSM_PSM_PS) {
+               state->prev_ps_mode = priv->powersave_mode.mode;
+               priv->powersave_mode.mode = WSM_PSM_PS;
+               cw1200_set_pm(priv, &priv->powersave_mode);
+               if (wait_event_interruptible_timeout(priv->ps_mode_switch_done,
+                                                    !priv->ps_mode_switch_in_progress, 1*HZ) <= 0) {
+                       goto revert3;
+               }
+       }
+
+       /* Store delayed work states. */
+       state->bss_loss_tmo =
+               cw1200_suspend_work(&priv->bss_loss_work);
+       state->join_tmo =
+               cw1200_suspend_work(&priv->join_timeout);
+       state->direct_probe =
+               cw1200_suspend_work(&priv->scan.probe_work);
+       state->link_id_gc =
+               cw1200_suspend_work(&priv->link_id_gc_work);
+
+       cancel_delayed_work_sync(&priv->clear_recent_scan_work);
+       atomic_set(&priv->recent_scan, 0);
+
+       /* Enable beacon skipping */
+       if (priv->join_status == CW1200_JOIN_STATUS_STA &&
+           priv->join_dtim_period &&
+           !priv->has_multicast_subscription) {
+               state->beacon_skipping = true;
+               wsm_set_beacon_wakeup_period(priv,
+                                            priv->join_dtim_period,
+                                            CW1200_BEACON_SKIPPING_MULTIPLIER * priv->join_dtim_period);
+       }
+
+       /* Stop serving thread */
+       if (cw1200_bh_suspend(priv))
+               goto revert4;
+
+       ret = timer_pending(&priv->mcast_timeout);
+       if (ret)
+               goto revert5;
+
+       /* Store suspend state */
+       pm_state->suspend_state = state;
+
+       /* Enable IRQ wake */
+       ret = priv->hwbus_ops->power_mgmt(priv->hwbus_priv, true);
+       if (ret) {
+               wiphy_err(priv->hw->wiphy,
+                         "PM request failed: %d. WoW is disabled.\n", ret);
+               cw1200_wow_resume(hw);
+               return -EBUSY;
+       }
+
+       /* Force resume if event is coming from the device. */
+       if (atomic_read(&priv->bh_rx)) {
+               cw1200_wow_resume(hw);
+               return -EAGAIN;
+       }
+
+       return 0;
+
+revert5:
+       WARN_ON(cw1200_bh_resume(priv));
+revert4:
+       cw1200_resume_work(priv, &priv->bss_loss_work,
+                          state->bss_loss_tmo);
+       cw1200_resume_work(priv, &priv->join_timeout,
+                          state->join_tmo);
+       cw1200_resume_work(priv, &priv->scan.probe_work,
+                          state->direct_probe);
+       cw1200_resume_work(priv, &priv->link_id_gc_work,
+                          state->link_id_gc);
+       kfree(state);
+revert3:
+       wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off);
+       wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off);
+revert2:
+       wsm_unlock_tx(priv);
+       up(&priv->scan.lock);
+revert1:
+       mutex_unlock(&priv->conf_mutex);
+       return -EBUSY;
+}
+
+int cw1200_wow_resume(struct ieee80211_hw *hw)
+{
+       struct cw1200_common *priv = hw->priv;
+       struct cw1200_pm_state *pm_state = &priv->pm_state;
+       struct cw1200_suspend_state *state;
+
+       state = pm_state->suspend_state;
+       pm_state->suspend_state = NULL;
+
+       /* Disable IRQ wake */
+       priv->hwbus_ops->power_mgmt(priv->hwbus_priv, false);
+
+       /* Scan.lock must be released before BH is resumed other way
+        * in case when BSS_LOST command arrived the processing of the
+        * command will be delayed.
+        */
+       up(&priv->scan.lock);
+
+       /* Resume BH thread */
+       WARN_ON(cw1200_bh_resume(priv));
+
+       /* Restores previous PS mode */
+       if (!priv->vif->p2p && priv->join_status == CW1200_JOIN_STATUS_STA) {
+               priv->powersave_mode.mode = state->prev_ps_mode;
+               cw1200_set_pm(priv, &priv->powersave_mode);
+       }
+
+       if (state->beacon_skipping) {
+               wsm_set_beacon_wakeup_period(priv, priv->beacon_int *
+                                            priv->join_dtim_period >
+                                            MAX_BEACON_SKIP_TIME_MS ? 1 :
+                                            priv->join_dtim_period, 0);
+               state->beacon_skipping = false;
+       }
+
+       /* Resume delayed work */
+       cw1200_resume_work(priv, &priv->bss_loss_work,
+                          state->bss_loss_tmo);
+       cw1200_resume_work(priv, &priv->join_timeout,
+                          state->join_tmo);
+       cw1200_resume_work(priv, &priv->scan.probe_work,
+                          state->direct_probe);
+       cw1200_resume_work(priv, &priv->link_id_gc_work,
+                          state->link_id_gc);
+
+       /* Remove UDP port filter */
+       wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off);
+
+       /* Remove ethernet frame type filter */
+       wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off);
+
+       /* Unlock datapath */
+       wsm_unlock_tx(priv);
+
+       /* Unlock configuration mutex */
+       mutex_unlock(&priv->conf_mutex);
+
+       /* Free memory */
+       kfree(state);
+
+       return 0;
+}
diff --git a/drivers/net/wireless/cw1200/pm.h b/drivers/net/wireless/cw1200/pm.h
new file mode 100644 (file)
index 0000000..3ed90ff
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Mac80211 power management interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2011, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef PM_H_INCLUDED
+#define PM_H_INCLUDED
+
+/* ******************************************************************** */
+/* mac80211 API                                                                */
+
+/* extern */  struct cw1200_common;
+/* private */ struct cw1200_suspend_state;
+
+struct cw1200_pm_state {
+       struct cw1200_suspend_state *suspend_state;
+       struct timer_list stay_awake;
+       struct platform_device *pm_dev;
+       spinlock_t lock; /* Protect access */
+};
+
+#ifdef CONFIG_PM
+int cw1200_pm_init(struct cw1200_pm_state *pm,
+                   struct cw1200_common *priv);
+void cw1200_pm_deinit(struct cw1200_pm_state *pm);
+int cw1200_wow_suspend(struct ieee80211_hw *hw,
+                      struct cfg80211_wowlan *wowlan);
+int cw1200_wow_resume(struct ieee80211_hw *hw);
+int cw1200_can_suspend(struct cw1200_common *priv);
+void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
+                         unsigned long tmo);
+#else
+static inline void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
+                                       unsigned long tmo) {
+}
+#endif
+#endif
diff --git a/drivers/net/wireless/cw1200/queue.c b/drivers/net/wireless/cw1200/queue.c
new file mode 100644 (file)
index 0000000..9c3925f
--- /dev/null
@@ -0,0 +1,583 @@
+/*
+ * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/mac80211.h>
+#include <linux/sched.h>
+#include "queue.h"
+#include "cw1200.h"
+#include "debug.h"
+
+/* private */ struct cw1200_queue_item
+{
+       struct list_head        head;
+       struct sk_buff          *skb;
+       u32                     packet_id;
+       unsigned long           queue_timestamp;
+       unsigned long           xmit_timestamp;
+       struct cw1200_txpriv    txpriv;
+       u8                      generation;
+};
+
+static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
+{
+       struct cw1200_queue_stats *stats = queue->stats;
+       if (queue->tx_locked_cnt++ == 0) {
+               pr_debug("[TX] Queue %d is locked.\n",
+                        queue->queue_id);
+               ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
+       }
+}
+
+static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
+{
+       struct cw1200_queue_stats *stats = queue->stats;
+       BUG_ON(!queue->tx_locked_cnt);
+       if (--queue->tx_locked_cnt == 0) {
+               pr_debug("[TX] Queue %d is unlocked.\n",
+                        queue->queue_id);
+               ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
+       }
+}
+
+static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
+                                        u8 *queue_id, u8 *item_generation,
+                                        u8 *item_id)
+{
+       *item_id                = (packet_id >>  0) & 0xFF;
+       *item_generation        = (packet_id >>  8) & 0xFF;
+       *queue_id               = (packet_id >> 16) & 0xFF;
+       *queue_generation       = (packet_id >> 24) & 0xFF;
+}
+
+static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
+                                           u8 item_generation, u8 item_id)
+{
+       return ((u32)item_id << 0) |
+               ((u32)item_generation << 8) |
+               ((u32)queue_id << 16) |
+               ((u32)queue_generation << 24);
+}
+
+static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
+                                struct list_head *gc_list)
+{
+       struct cw1200_queue_item *item, *tmp;
+
+       list_for_each_entry_safe(item, tmp, gc_list, head) {
+               list_del(&item->head);
+               stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
+               kfree(item);
+       }
+}
+
+static void cw1200_queue_register_post_gc(struct list_head *gc_list,
+                                         struct cw1200_queue_item *item)
+{
+       struct cw1200_queue_item *gc_item;
+       gc_item = kmalloc(sizeof(struct cw1200_queue_item),
+                       GFP_ATOMIC);
+       BUG_ON(!gc_item);
+       memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
+       list_add_tail(&gc_item->head, gc_list);
+}
+
+static void __cw1200_queue_gc(struct cw1200_queue *queue,
+                             struct list_head *head,
+                             bool unlock)
+{
+       struct cw1200_queue_stats *stats = queue->stats;
+       struct cw1200_queue_item *item = NULL, *tmp;
+       bool wakeup_stats = false;
+
+       list_for_each_entry_safe(item, tmp, &queue->queue, head) {
+               if (jiffies - item->queue_timestamp < queue->ttl)
+                       break;
+               --queue->num_queued;
+               --queue->link_map_cache[item->txpriv.link_id];
+               spin_lock_bh(&stats->lock);
+               --stats->num_queued;
+               if (!--stats->link_map_cache[item->txpriv.link_id])
+                       wakeup_stats = true;
+               spin_unlock_bh(&stats->lock);
+               cw1200_debug_tx_ttl(stats->priv);
+               cw1200_queue_register_post_gc(head, item);
+               item->skb = NULL;
+               list_move_tail(&item->head, &queue->free_pool);
+       }
+
+       if (wakeup_stats)
+               wake_up(&stats->wait_link_id_empty);
+
+       if (queue->overfull) {
+               if (queue->num_queued <= (queue->capacity >> 1)) {
+                       queue->overfull = false;
+                       if (unlock)
+                               __cw1200_queue_unlock(queue);
+               } else if (item) {
+                       unsigned long tmo = item->queue_timestamp + queue->ttl;
+                       mod_timer(&queue->gc, tmo);
+                       cw1200_pm_stay_awake(&stats->priv->pm_state,
+                                            tmo - jiffies);
+               }
+       }
+}
+
+static void cw1200_queue_gc(unsigned long arg)
+{
+       LIST_HEAD(list);
+       struct cw1200_queue *queue =
+               (struct cw1200_queue *)arg;
+
+       spin_lock_bh(&queue->lock);
+       __cw1200_queue_gc(queue, &list, true);
+       spin_unlock_bh(&queue->lock);
+       cw1200_queue_post_gc(queue->stats, &list);
+}
+
+int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
+                           size_t map_capacity,
+                           cw1200_queue_skb_dtor_t skb_dtor,
+                           struct cw1200_common *priv)
+{
+       memset(stats, 0, sizeof(*stats));
+       stats->map_capacity = map_capacity;
+       stats->skb_dtor = skb_dtor;
+       stats->priv = priv;
+       spin_lock_init(&stats->lock);
+       init_waitqueue_head(&stats->wait_link_id_empty);
+
+       stats->link_map_cache = kzalloc(sizeof(int) * map_capacity,
+                                       GFP_KERNEL);
+       if (!stats->link_map_cache)
+               return -ENOMEM;
+
+       return 0;
+}
+
+int cw1200_queue_init(struct cw1200_queue *queue,
+                     struct cw1200_queue_stats *stats,
+                     u8 queue_id,
+                     size_t capacity,
+                     unsigned long ttl)
+{
+       size_t i;
+
+       memset(queue, 0, sizeof(*queue));
+       queue->stats = stats;
+       queue->capacity = capacity;
+       queue->queue_id = queue_id;
+       queue->ttl = ttl;
+       INIT_LIST_HEAD(&queue->queue);
+       INIT_LIST_HEAD(&queue->pending);
+       INIT_LIST_HEAD(&queue->free_pool);
+       spin_lock_init(&queue->lock);
+       init_timer(&queue->gc);
+       queue->gc.data = (unsigned long)queue;
+       queue->gc.function = cw1200_queue_gc;
+
+       queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity,
+                       GFP_KERNEL);
+       if (!queue->pool)
+               return -ENOMEM;
+
+       queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity,
+                       GFP_KERNEL);
+       if (!queue->link_map_cache) {
+               kfree(queue->pool);
+               queue->pool = NULL;
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < capacity; ++i)
+               list_add_tail(&queue->pool[i].head, &queue->free_pool);
+
+       return 0;
+}
+
+int cw1200_queue_clear(struct cw1200_queue *queue)
+{
+       int i;
+       LIST_HEAD(gc_list);
+       struct cw1200_queue_stats *stats = queue->stats;
+       struct cw1200_queue_item *item, *tmp;
+
+       spin_lock_bh(&queue->lock);
+       queue->generation++;
+       list_splice_tail_init(&queue->queue, &queue->pending);
+       list_for_each_entry_safe(item, tmp, &queue->pending, head) {
+               WARN_ON(!item->skb);
+               cw1200_queue_register_post_gc(&gc_list, item);
+               item->skb = NULL;
+               list_move_tail(&item->head, &queue->free_pool);
+       }
+       queue->num_queued = 0;
+       queue->num_pending = 0;
+
+       spin_lock_bh(&stats->lock);
+       for (i = 0; i < stats->map_capacity; ++i) {
+               stats->num_queued -= queue->link_map_cache[i];
+               stats->link_map_cache[i] -= queue->link_map_cache[i];
+               queue->link_map_cache[i] = 0;
+       }
+       spin_unlock_bh(&stats->lock);
+       if (queue->overfull) {
+               queue->overfull = false;
+               __cw1200_queue_unlock(queue);
+       }
+       spin_unlock_bh(&queue->lock);
+       wake_up(&stats->wait_link_id_empty);
+       cw1200_queue_post_gc(stats, &gc_list);
+       return 0;
+}
+
+void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
+{
+       kfree(stats->link_map_cache);
+       stats->link_map_cache = NULL;
+}
+
+void cw1200_queue_deinit(struct cw1200_queue *queue)
+{
+       cw1200_queue_clear(queue);
+       del_timer_sync(&queue->gc);
+       INIT_LIST_HEAD(&queue->free_pool);
+       kfree(queue->pool);
+       kfree(queue->link_map_cache);
+       queue->pool = NULL;
+       queue->link_map_cache = NULL;
+       queue->capacity = 0;
+}
+
+size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
+                                  u32 link_id_map)
+{
+       size_t ret;
+       int i, bit;
+       size_t map_capacity = queue->stats->map_capacity;
+
+       if (!link_id_map)
+               return 0;
+
+       spin_lock_bh(&queue->lock);
+       if (link_id_map == (u32)-1) {
+               ret = queue->num_queued - queue->num_pending;
+       } else {
+               ret = 0;
+               for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
+                       if (link_id_map & bit)
+                               ret += queue->link_map_cache[i];
+               }
+       }
+       spin_unlock_bh(&queue->lock);
+       return ret;
+}
+
+int cw1200_queue_put(struct cw1200_queue *queue,
+                    struct sk_buff *skb,
+                    struct cw1200_txpriv *txpriv)
+{
+       int ret = 0;
+       LIST_HEAD(gc_list);
+       struct cw1200_queue_stats *stats = queue->stats;
+
+       if (txpriv->link_id >= queue->stats->map_capacity)
+               return -EINVAL;
+
+       spin_lock_bh(&queue->lock);
+       if (!WARN_ON(list_empty(&queue->free_pool))) {
+               struct cw1200_queue_item *item = list_first_entry(
+                       &queue->free_pool, struct cw1200_queue_item, head);
+               BUG_ON(item->skb);
+
+               list_move_tail(&item->head, &queue->queue);
+               item->skb = skb;
+               item->txpriv = *txpriv;
+               item->generation = 0;
+               item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
+                                                           queue->queue_id,
+                                                           item->generation,
+                                                           item - queue->pool);
+               item->queue_timestamp = jiffies;
+
+               ++queue->num_queued;
+               ++queue->link_map_cache[txpriv->link_id];
+
+               spin_lock_bh(&stats->lock);
+               ++stats->num_queued;
+               ++stats->link_map_cache[txpriv->link_id];
+               spin_unlock_bh(&stats->lock);
+
+               /* TX may happen in parallel sometimes.
+                * Leave extra queue slots so we don't overflow.
+                */
+               if (queue->overfull == false &&
+                   queue->num_queued >=
+                   (queue->capacity - (num_present_cpus() - 1))) {
+                       queue->overfull = true;
+                       __cw1200_queue_lock(queue);
+                       mod_timer(&queue->gc, jiffies);
+               }
+       } else {
+               ret = -ENOENT;
+       }
+       spin_unlock_bh(&queue->lock);
+       return ret;
+}
+
+int cw1200_queue_get(struct cw1200_queue *queue,
+                    u32 link_id_map,
+                    struct wsm_tx **tx,
+                    struct ieee80211_tx_info **tx_info,
+                    const struct cw1200_txpriv **txpriv)
+{
+       int ret = -ENOENT;
+       struct cw1200_queue_item *item;
+       struct cw1200_queue_stats *stats = queue->stats;
+       bool wakeup_stats = false;
+
+       spin_lock_bh(&queue->lock);
+       list_for_each_entry(item, &queue->queue, head) {
+               if (link_id_map & BIT(item->txpriv.link_id)) {
+                       ret = 0;
+                       break;
+               }
+       }
+
+       if (!WARN_ON(ret)) {
+               *tx = (struct wsm_tx *)item->skb->data;
+               *tx_info = IEEE80211_SKB_CB(item->skb);
+               *txpriv = &item->txpriv;
+               (*tx)->packet_id = item->packet_id;
+               list_move_tail(&item->head, &queue->pending);
+               ++queue->num_pending;
+               --queue->link_map_cache[item->txpriv.link_id];
+               item->xmit_timestamp = jiffies;
+
+               spin_lock_bh(&stats->lock);
+               --stats->num_queued;
+               if (!--stats->link_map_cache[item->txpriv.link_id])
+                       wakeup_stats = true;
+               spin_unlock_bh(&stats->lock);
+       }
+       spin_unlock_bh(&queue->lock);
+       if (wakeup_stats)
+               wake_up(&stats->wait_link_id_empty);
+       return ret;
+}
+
+int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
+{
+       int ret = 0;
+       u8 queue_generation, queue_id, item_generation, item_id;
+       struct cw1200_queue_item *item;
+       struct cw1200_queue_stats *stats = queue->stats;
+
+       cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
+                             &item_generation, &item_id);
+
+       item = &queue->pool[item_id];
+
+       spin_lock_bh(&queue->lock);
+       BUG_ON(queue_id != queue->queue_id);
+       if (queue_generation != queue->generation) {
+               ret = -ENOENT;
+       } else if (item_id >= (unsigned) queue->capacity) {
+               WARN_ON(1);
+               ret = -EINVAL;
+       } else if (item->generation != item_generation) {
+               WARN_ON(1);
+               ret = -ENOENT;
+       } else {
+               --queue->num_pending;
+               ++queue->link_map_cache[item->txpriv.link_id];
+
+               spin_lock_bh(&stats->lock);
+               ++stats->num_queued;
+               ++stats->link_map_cache[item->txpriv.link_id];
+               spin_unlock_bh(&stats->lock);
+
+               item->generation = ++item_generation;
+               item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
+                                                           queue_id,
+                                                           item_generation,
+                                                           item_id);
+               list_move(&item->head, &queue->queue);
+       }
+       spin_unlock_bh(&queue->lock);
+       return ret;
+}
+
+int cw1200_queue_requeue_all(struct cw1200_queue *queue)
+{
+       struct cw1200_queue_item *item, *tmp;
+       struct cw1200_queue_stats *stats = queue->stats;
+       spin_lock_bh(&queue->lock);
+
+       list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
+               --queue->num_pending;
+               ++queue->link_map_cache[item->txpriv.link_id];
+
+               spin_lock_bh(&stats->lock);
+               ++stats->num_queued;
+               ++stats->link_map_cache[item->txpriv.link_id];
+               spin_unlock_bh(&stats->lock);
+
+               ++item->generation;
+               item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
+                                                           queue->queue_id,
+                                                           item->generation,
+                                                           item - queue->pool);
+               list_move(&item->head, &queue->queue);
+       }
+       spin_unlock_bh(&queue->lock);
+
+       return 0;
+}
+
+int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
+{
+       int ret = 0;
+       u8 queue_generation, queue_id, item_generation, item_id;
+       struct cw1200_queue_item *item;
+       struct cw1200_queue_stats *stats = queue->stats;
+       struct sk_buff *gc_skb = NULL;
+       struct cw1200_txpriv gc_txpriv;
+
+       cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
+                             &item_generation, &item_id);
+
+       item = &queue->pool[item_id];
+
+       spin_lock_bh(&queue->lock);
+       BUG_ON(queue_id != queue->queue_id);
+       if (queue_generation != queue->generation) {
+               ret = -ENOENT;
+       } else if (item_id >= (unsigned) queue->capacity) {
+               WARN_ON(1);
+               ret = -EINVAL;
+       } else if (item->generation != item_generation) {
+               WARN_ON(1);
+               ret = -ENOENT;
+       } else {
+               gc_txpriv = item->txpriv;
+               gc_skb = item->skb;
+               item->skb = NULL;
+               --queue->num_pending;
+               --queue->num_queued;
+               ++queue->num_sent;
+               ++item->generation;
+               /* Do not use list_move_tail here, but list_move:
+                * try to utilize cache row.
+                */
+               list_move(&item->head, &queue->free_pool);
+
+               if (queue->overfull &&
+                   (queue->num_queued <= (queue->capacity >> 1))) {
+                       queue->overfull = false;
+                       __cw1200_queue_unlock(queue);
+               }
+       }
+       spin_unlock_bh(&queue->lock);
+
+       if (gc_skb)
+               stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
+
+       return ret;
+}
+
+int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
+                        struct sk_buff **skb,
+                        const struct cw1200_txpriv **txpriv)
+{
+       int ret = 0;
+       u8 queue_generation, queue_id, item_generation, item_id;
+       struct cw1200_queue_item *item;
+       cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
+                             &item_generation, &item_id);
+
+       item = &queue->pool[item_id];
+
+       spin_lock_bh(&queue->lock);
+       BUG_ON(queue_id != queue->queue_id);
+       if (queue_generation != queue->generation) {
+               ret = -ENOENT;
+       } else if (item_id >= (unsigned) queue->capacity) {
+               WARN_ON(1);
+               ret = -EINVAL;
+       } else if (item->generation != item_generation) {
+               WARN_ON(1);
+               ret = -ENOENT;
+       } else {
+               *skb = item->skb;
+               *txpriv = &item->txpriv;
+       }
+       spin_unlock_bh(&queue->lock);
+       return ret;
+}
+
+void cw1200_queue_lock(struct cw1200_queue *queue)
+{
+       spin_lock_bh(&queue->lock);
+       __cw1200_queue_lock(queue);
+       spin_unlock_bh(&queue->lock);
+}
+
+void cw1200_queue_unlock(struct cw1200_queue *queue)
+{
+       spin_lock_bh(&queue->lock);
+       __cw1200_queue_unlock(queue);
+       spin_unlock_bh(&queue->lock);
+}
+
+bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
+                                    unsigned long *timestamp,
+                                    u32 pending_frame_id)
+{
+       struct cw1200_queue_item *item;
+       bool ret;
+
+       spin_lock_bh(&queue->lock);
+       ret = !list_empty(&queue->pending);
+       if (ret) {
+               list_for_each_entry(item, &queue->pending, head) {
+                       if (item->packet_id != pending_frame_id)
+                               if (time_before(item->xmit_timestamp,
+                                               *timestamp))
+                                       *timestamp = item->xmit_timestamp;
+               }
+       }
+       spin_unlock_bh(&queue->lock);
+       return ret;
+}
+
+bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
+                                u32 link_id_map)
+{
+       bool empty = true;
+
+       spin_lock_bh(&stats->lock);
+       if (link_id_map == (u32)-1) {
+               empty = stats->num_queued == 0;
+       } else {
+               int i;
+               for (i = 0; i < stats->map_capacity; ++i) {
+                       if (link_id_map & BIT(i)) {
+                               if (stats->link_map_cache[i]) {
+                                       empty = false;
+                                       break;
+                               }
+                       }
+               }
+       }
+       spin_unlock_bh(&stats->lock);
+
+       return empty;
+}
diff --git a/drivers/net/wireless/cw1200/queue.h b/drivers/net/wireless/cw1200/queue.h
new file mode 100644 (file)
index 0000000..119f9c7
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_QUEUE_H_INCLUDED
+#define CW1200_QUEUE_H_INCLUDED
+
+/* private */ struct cw1200_queue_item;
+
+/* extern */ struct sk_buff;
+/* extern */ struct wsm_tx;
+/* extern */ struct cw1200_common;
+/* extern */ struct ieee80211_tx_queue_stats;
+/* extern */ struct cw1200_txpriv;
+
+/* forward */ struct cw1200_queue_stats;
+
+typedef void (*cw1200_queue_skb_dtor_t)(struct cw1200_common *priv,
+                                       struct sk_buff *skb,
+                                       const struct cw1200_txpriv *txpriv);
+
+struct cw1200_queue {
+       struct cw1200_queue_stats *stats;
+       size_t                  capacity;
+       size_t                  num_queued;
+       size_t                  num_pending;
+       size_t                  num_sent;
+       struct cw1200_queue_item *pool;
+       struct list_head        queue;
+       struct list_head        free_pool;
+       struct list_head        pending;
+       int                     tx_locked_cnt;
+       int                     *link_map_cache;
+       bool                    overfull;
+       spinlock_t              lock; /* Protect queue entry */
+       u8                      queue_id;
+       u8                      generation;
+       struct timer_list       gc;
+       unsigned long           ttl;
+};
+
+struct cw1200_queue_stats {
+       spinlock_t              lock; /* Protect stats entry */
+       int                     *link_map_cache;
+       int                     num_queued;
+       size_t                  map_capacity;
+       wait_queue_head_t       wait_link_id_empty;
+       cw1200_queue_skb_dtor_t skb_dtor;
+       struct cw1200_common    *priv;
+};
+
+struct cw1200_txpriv {
+       u8 link_id;
+       u8 raw_link_id;
+       u8 tid;
+       u8 rate_id;
+       u8 offset;
+};
+
+int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
+                           size_t map_capacity,
+                           cw1200_queue_skb_dtor_t skb_dtor,
+                           struct cw1200_common *priv);
+int cw1200_queue_init(struct cw1200_queue *queue,
+                     struct cw1200_queue_stats *stats,
+                     u8 queue_id,
+                     size_t capacity,
+                     unsigned long ttl);
+int cw1200_queue_clear(struct cw1200_queue *queue);
+void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats);
+void cw1200_queue_deinit(struct cw1200_queue *queue);
+
+size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
+                                  u32 link_id_map);
+int cw1200_queue_put(struct cw1200_queue *queue,
+                    struct sk_buff *skb,
+                    struct cw1200_txpriv *txpriv);
+int cw1200_queue_get(struct cw1200_queue *queue,
+                    u32 link_id_map,
+                    struct wsm_tx **tx,
+                    struct ieee80211_tx_info **tx_info,
+                    const struct cw1200_txpriv **txpriv);
+int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id);
+int cw1200_queue_requeue_all(struct cw1200_queue *queue);
+int cw1200_queue_remove(struct cw1200_queue *queue,
+                       u32 packet_id);
+int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
+                        struct sk_buff **skb,
+                        const struct cw1200_txpriv **txpriv);
+void cw1200_queue_lock(struct cw1200_queue *queue);
+void cw1200_queue_unlock(struct cw1200_queue *queue);
+bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
+                                    unsigned long *timestamp,
+                                    u32 pending_frame_id);
+
+bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
+                                u32 link_id_map);
+
+static inline u8 cw1200_queue_get_queue_id(u32 packet_id)
+{
+       return (packet_id >> 16) & 0xFF;
+}
+
+static inline u8 cw1200_queue_get_generation(u32 packet_id)
+{
+       return (packet_id >>  8) & 0xFF;
+}
+
+#endif /* CW1200_QUEUE_H_INCLUDED */
diff --git a/drivers/net/wireless/cw1200/scan.c b/drivers/net/wireless/cw1200/scan.c
new file mode 100644 (file)
index 0000000..ee3c190
--- /dev/null
@@ -0,0 +1,461 @@
+/*
+ * Scan implementation for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include "cw1200.h"
+#include "scan.h"
+#include "sta.h"
+#include "pm.h"
+
+static void cw1200_scan_restart_delayed(struct cw1200_common *priv);
+
+static int cw1200_scan_start(struct cw1200_common *priv, struct wsm_scan *scan)
+{
+       int ret, i;
+       int tmo = 2000;
+
+       switch (priv->join_status) {
+       case CW1200_JOIN_STATUS_PRE_STA:
+       case CW1200_JOIN_STATUS_JOINING:
+               return -EBUSY;
+       default:
+               break;
+       }
+
+       wiphy_dbg(priv->hw->wiphy, "[SCAN] hw req, type %d, %d channels, flags: 0x%x.\n",
+                 scan->type, scan->num_channels, scan->flags);
+
+       for (i = 0; i < scan->num_channels; ++i)
+               tmo += scan->ch[i].max_chan_time + 10;
+
+       cancel_delayed_work_sync(&priv->clear_recent_scan_work);
+       atomic_set(&priv->scan.in_progress, 1);
+       atomic_set(&priv->recent_scan, 1);
+       cw1200_pm_stay_awake(&priv->pm_state, tmo * HZ / 1000);
+       queue_delayed_work(priv->workqueue, &priv->scan.timeout,
+                          tmo * HZ / 1000);
+       ret = wsm_scan(priv, scan);
+       if (ret) {
+               atomic_set(&priv->scan.in_progress, 0);
+               cancel_delayed_work_sync(&priv->scan.timeout);
+               cw1200_scan_restart_delayed(priv);
+       }
+       return ret;
+}
+
+int cw1200_hw_scan(struct ieee80211_hw *hw,
+                  struct ieee80211_vif *vif,
+                  struct cfg80211_scan_request *req)
+{
+       struct cw1200_common *priv = hw->priv;
+       struct wsm_template_frame frame = {
+               .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST,
+       };
+       int i, ret;
+
+       if (!priv->vif)
+               return -EINVAL;
+
+       /* Scan when P2P_GO corrupt firmware MiniAP mode */
+       if (priv->join_status == CW1200_JOIN_STATUS_AP)
+               return -EOPNOTSUPP;
+
+       if (req->n_ssids == 1 && !req->ssids[0].ssid_len)
+               req->n_ssids = 0;
+
+       wiphy_dbg(hw->wiphy, "[SCAN] Scan request for %d SSIDs.\n",
+                 req->n_ssids);
+
+       if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
+               return -EINVAL;
+
+       frame.skb = ieee80211_probereq_get(hw, priv->vif, NULL, 0,
+               req->ie_len);
+       if (!frame.skb)
+               return -ENOMEM;
+
+       if (req->ie_len)
+               memcpy(skb_put(frame.skb, req->ie_len), req->ie, req->ie_len);
+
+       /* will be unlocked in cw1200_scan_work() */
+       down(&priv->scan.lock);
+       mutex_lock(&priv->conf_mutex);
+
+       ret = wsm_set_template_frame(priv, &frame);
+       if (!ret) {
+               /* Host want to be the probe responder. */
+               ret = wsm_set_probe_responder(priv, true);
+       }
+       if (ret) {
+               mutex_unlock(&priv->conf_mutex);
+               up(&priv->scan.lock);
+               dev_kfree_skb(frame.skb);
+               return ret;
+       }
+
+       wsm_lock_tx(priv);
+
+       BUG_ON(priv->scan.req);
+       priv->scan.req = req;
+       priv->scan.n_ssids = 0;
+       priv->scan.status = 0;
+       priv->scan.begin = &req->channels[0];
+       priv->scan.curr = priv->scan.begin;
+       priv->scan.end = &req->channels[req->n_channels];
+       priv->scan.output_power = priv->output_power;
+
+       for (i = 0; i < req->n_ssids; ++i) {
+               struct wsm_ssid *dst = &priv->scan.ssids[priv->scan.n_ssids];
+               memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid));
+               dst->length = req->ssids[i].ssid_len;
+               ++priv->scan.n_ssids;
+       }
+
+       mutex_unlock(&priv->conf_mutex);
+
+       if (frame.skb)
+               dev_kfree_skb(frame.skb);
+       queue_work(priv->workqueue, &priv->scan.work);
+       return 0;
+}
+
+void cw1200_scan_work(struct work_struct *work)
+{
+       struct cw1200_common *priv = container_of(work, struct cw1200_common,
+                                                       scan.work);
+       struct ieee80211_channel **it;
+       struct wsm_scan scan = {
+               .type = WSM_SCAN_TYPE_FOREGROUND,
+               .flags = WSM_SCAN_FLAG_SPLIT_METHOD,
+       };
+       bool first_run = (priv->scan.begin == priv->scan.curr &&
+                         priv->scan.begin != priv->scan.end);
+       int i;
+
+       if (first_run) {
+               /* Firmware gets crazy if scan request is sent
+                * when STA is joined but not yet associated.
+                * Force unjoin in this case.
+                */
+               if (cancel_delayed_work_sync(&priv->join_timeout) > 0)
+                       cw1200_join_timeout(&priv->join_timeout.work);
+       }
+
+       mutex_lock(&priv->conf_mutex);
+
+       if (first_run) {
+               if (priv->join_status == CW1200_JOIN_STATUS_STA &&
+                   !(priv->powersave_mode.mode & WSM_PSM_PS)) {
+                       struct wsm_set_pm pm = priv->powersave_mode;
+                       pm.mode = WSM_PSM_PS;
+                       cw1200_set_pm(priv, &pm);
+               } else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) {
+                       /* FW bug: driver has to restart p2p-dev mode
+                        * after scan
+                        */
+                       cw1200_disable_listening(priv);
+               }
+       }
+
+       if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) {
+               if (priv->scan.output_power != priv->output_power)
+                       wsm_set_output_power(priv, priv->output_power * 10);
+               if (priv->join_status == CW1200_JOIN_STATUS_STA &&
+                   !(priv->powersave_mode.mode & WSM_PSM_PS))
+                       cw1200_set_pm(priv, &priv->powersave_mode);
+
+               if (priv->scan.status < 0)
+                       wiphy_dbg(priv->hw->wiphy, "[SCAN] Scan failed (%d).\n",
+                                 priv->scan.status);
+               else if (priv->scan.req)
+                       wiphy_dbg(priv->hw->wiphy,
+                                 "[SCAN] Scan completed.\n");
+               else
+                       wiphy_dbg(priv->hw->wiphy,
+                                 "[SCAN] Scan canceled.\n");
+
+               priv->scan.req = NULL;
+               cw1200_scan_restart_delayed(priv);
+               wsm_unlock_tx(priv);
+               mutex_unlock(&priv->conf_mutex);
+               ieee80211_scan_completed(priv->hw, priv->scan.status ? 1 : 0);
+               up(&priv->scan.lock);
+               return;
+       } else {
+               struct ieee80211_channel *first = *priv->scan.curr;
+               for (it = priv->scan.curr + 1, i = 1;
+                    it != priv->scan.end && i < WSM_SCAN_MAX_NUM_OF_CHANNELS;
+                    ++it, ++i) {
+                       if ((*it)->band != first->band)
+                               break;
+                       if (((*it)->flags ^ first->flags) &
+                                       IEEE80211_CHAN_PASSIVE_SCAN)
+                               break;
+                       if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+                           (*it)->max_power != first->max_power)
+                               break;
+               }
+               scan.band = first->band;
+
+               if (priv->scan.req->no_cck)
+                       scan.max_tx_rate = WSM_TRANSMIT_RATE_6;
+               else
+                       scan.max_tx_rate = WSM_TRANSMIT_RATE_1;
+               scan.num_probes =
+                       (first->flags & IEEE80211_CHAN_PASSIVE_SCAN) ? 0 : 2;
+               scan.num_ssids = priv->scan.n_ssids;
+               scan.ssids = &priv->scan.ssids[0];
+               scan.num_channels = it - priv->scan.curr;
+               /* TODO: Is it optimal? */
+               scan.probe_delay = 100;
+               /* It is not stated in WSM specification, however
+                * FW team says that driver may not use FG scan
+                * when joined.
+                */
+               if (priv->join_status == CW1200_JOIN_STATUS_STA) {
+                       scan.type = WSM_SCAN_TYPE_BACKGROUND;
+                       scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND;
+               }
+               scan.ch = kzalloc(
+                       sizeof(struct wsm_scan_ch) * (it - priv->scan.curr),
+                       GFP_KERNEL);
+               if (!scan.ch) {
+                       priv->scan.status = -ENOMEM;
+                       goto fail;
+               }
+               for (i = 0; i < scan.num_channels; ++i) {
+                       scan.ch[i].number = priv->scan.curr[i]->hw_value;
+                       if (priv->scan.curr[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+                               scan.ch[i].min_chan_time = 50;
+                               scan.ch[i].max_chan_time = 100;
+                       } else {
+                               scan.ch[i].min_chan_time = 10;
+                               scan.ch[i].max_chan_time = 25;
+                       }
+               }
+               if (!(first->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+                   priv->scan.output_power != first->max_power) {
+                       priv->scan.output_power = first->max_power;
+                       wsm_set_output_power(priv,
+                                            priv->scan.output_power * 10);
+               }
+               priv->scan.status = cw1200_scan_start(priv, &scan);
+               kfree(scan.ch);
+               if (priv->scan.status)
+                       goto fail;
+               priv->scan.curr = it;
+       }
+       mutex_unlock(&priv->conf_mutex);
+       return;
+
+fail:
+       priv->scan.curr = priv->scan.end;
+       mutex_unlock(&priv->conf_mutex);
+       queue_work(priv->workqueue, &priv->scan.work);
+       return;
+}
+
+static void cw1200_scan_restart_delayed(struct cw1200_common *priv)
+{
+       /* FW bug: driver has to restart p2p-dev mode after scan. */
+       if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) {
+               cw1200_enable_listening(priv);
+               cw1200_update_filtering(priv);
+       }
+
+       if (priv->delayed_unjoin) {
+               priv->delayed_unjoin = false;
+               if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+                       wsm_unlock_tx(priv);
+       } else if (priv->delayed_link_loss) {
+                       wiphy_dbg(priv->hw->wiphy, "[CQM] Requeue BSS loss.\n");
+                       priv->delayed_link_loss = 0;
+                       cw1200_cqm_bssloss_sm(priv, 1, 0, 0);
+       }
+}
+
+static void cw1200_scan_complete(struct cw1200_common *priv)
+{
+       queue_delayed_work(priv->workqueue, &priv->clear_recent_scan_work, HZ);
+       if (priv->scan.direct_probe) {
+               wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe complete.\n");
+               cw1200_scan_restart_delayed(priv);
+               priv->scan.direct_probe = 0;
+               up(&priv->scan.lock);
+               wsm_unlock_tx(priv);
+       } else {
+               cw1200_scan_work(&priv->scan.work);
+       }
+}
+
+void cw1200_scan_failed_cb(struct cw1200_common *priv)
+{
+       if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+               /* STA is stopped. */
+               return;
+
+       if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) {
+               priv->scan.status = -EIO;
+               queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0);
+       }
+}
+
+
+void cw1200_scan_complete_cb(struct cw1200_common *priv,
+                               struct wsm_scan_complete *arg)
+{
+       if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+               /* STA is stopped. */
+               return;
+
+       if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) {
+               priv->scan.status = 1;
+               queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0);
+       }
+}
+
+void cw1200_clear_recent_scan_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common,
+                            clear_recent_scan_work.work);
+       atomic_xchg(&priv->recent_scan, 0);
+}
+
+void cw1200_scan_timeout(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, scan.timeout.work);
+       if (atomic_xchg(&priv->scan.in_progress, 0)) {
+               if (priv->scan.status > 0) {
+                       priv->scan.status = 0;
+               } else if (!priv->scan.status) {
+                       wiphy_warn(priv->hw->wiphy,
+                                  "Timeout waiting for scan complete notification.\n");
+                       priv->scan.status = -ETIMEDOUT;
+                       priv->scan.curr = priv->scan.end;
+                       wsm_stop_scan(priv);
+               }
+               cw1200_scan_complete(priv);
+       }
+}
+
+void cw1200_probe_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, scan.probe_work.work);
+       u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id);
+       struct cw1200_queue *queue = &priv->tx_queue[queue_id];
+       const struct cw1200_txpriv *txpriv;
+       struct wsm_tx *wsm;
+       struct wsm_template_frame frame = {
+               .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST,
+       };
+       struct wsm_ssid ssids[1] = {{
+               .length = 0,
+       } };
+       struct wsm_scan_ch ch[1] = {{
+               .min_chan_time = 0,
+               .max_chan_time = 10,
+       } };
+       struct wsm_scan scan = {
+               .type = WSM_SCAN_TYPE_FOREGROUND,
+               .num_probes = 1,
+               .probe_delay = 0,
+               .num_channels = 1,
+               .ssids = ssids,
+               .ch = ch,
+       };
+       u8 *ies;
+       size_t ies_len;
+       int ret;
+
+       wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe work.\n");
+
+       mutex_lock(&priv->conf_mutex);
+       if (down_trylock(&priv->scan.lock)) {
+               /* Scan is already in progress. Requeue self. */
+               schedule();
+               queue_delayed_work(priv->workqueue,
+                                  &priv->scan.probe_work, HZ / 10);
+               mutex_unlock(&priv->conf_mutex);
+               return;
+       }
+
+       /* Make sure we still have a pending probe req */
+       if (cw1200_queue_get_skb(queue, priv->pending_frame_id,
+                                &frame.skb, &txpriv)) {
+               up(&priv->scan.lock);
+               mutex_unlock(&priv->conf_mutex);
+               wsm_unlock_tx(priv);
+               return;
+       }
+       wsm = (struct wsm_tx *)frame.skb->data;
+       scan.max_tx_rate = wsm->max_tx_rate;
+       scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+               WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
+       if (priv->join_status == CW1200_JOIN_STATUS_STA ||
+           priv->join_status == CW1200_JOIN_STATUS_IBSS) {
+               scan.type = WSM_SCAN_TYPE_BACKGROUND;
+               scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND;
+       }
+       ch[0].number = priv->channel->hw_value;
+
+       skb_pull(frame.skb, txpriv->offset);
+
+       ies = &frame.skb->data[sizeof(struct ieee80211_hdr_3addr)];
+       ies_len = frame.skb->len - sizeof(struct ieee80211_hdr_3addr);
+
+       if (ies_len) {
+               u8 *ssidie =
+                       (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ies, ies_len);
+               if (ssidie && ssidie[1] && ssidie[1] <= sizeof(ssids[0].ssid)) {
+                       u8 *nextie = &ssidie[2 + ssidie[1]];
+                       /* Remove SSID from the IE list. It has to be provided
+                        * as a separate argument in cw1200_scan_start call
+                        */
+
+                       /* Store SSID localy */
+                       ssids[0].length = ssidie[1];
+                       memcpy(ssids[0].ssid, &ssidie[2], ssids[0].length);
+                       scan.num_ssids = 1;
+
+                       /* Remove SSID from IE list */
+                       ssidie[1] = 0;
+                       memmove(&ssidie[2], nextie, &ies[ies_len] - nextie);
+                       skb_trim(frame.skb, frame.skb->len - ssids[0].length);
+               }
+       }
+
+       /* FW bug: driver has to restart p2p-dev mode after scan */
+       if (priv->join_status == CW1200_JOIN_STATUS_MONITOR)
+               cw1200_disable_listening(priv);
+       ret = wsm_set_template_frame(priv, &frame);
+       priv->scan.direct_probe = 1;
+       if (!ret) {
+               wsm_flush_tx(priv);
+               ret = cw1200_scan_start(priv, &scan);
+       }
+       mutex_unlock(&priv->conf_mutex);
+
+       skb_push(frame.skb, txpriv->offset);
+       if (!ret)
+               IEEE80211_SKB_CB(frame.skb)->flags |= IEEE80211_TX_STAT_ACK;
+       BUG_ON(cw1200_queue_remove(queue, priv->pending_frame_id));
+
+       if (ret) {
+               priv->scan.direct_probe = 0;
+               up(&priv->scan.lock);
+               wsm_unlock_tx(priv);
+       }
+
+       return;
+}
diff --git a/drivers/net/wireless/cw1200/scan.h b/drivers/net/wireless/cw1200/scan.h
new file mode 100644 (file)
index 0000000..5a8296c
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Scan interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SCAN_H_INCLUDED
+#define SCAN_H_INCLUDED
+
+#include <linux/semaphore.h>
+#include "wsm.h"
+
+/* external */ struct sk_buff;
+/* external */ struct cfg80211_scan_request;
+/* external */ struct ieee80211_channel;
+/* external */ struct ieee80211_hw;
+/* external */ struct work_struct;
+
+struct cw1200_scan {
+       struct semaphore lock;
+       struct work_struct work;
+       struct delayed_work timeout;
+       struct cfg80211_scan_request *req;
+       struct ieee80211_channel **begin;
+       struct ieee80211_channel **curr;
+       struct ieee80211_channel **end;
+       struct wsm_ssid ssids[WSM_SCAN_MAX_NUM_OF_SSIDS];
+       int output_power;
+       int n_ssids;
+       int status;
+       atomic_t in_progress;
+       /* Direct probe requests workaround */
+       struct delayed_work probe_work;
+       int direct_probe;
+};
+
+int cw1200_hw_scan(struct ieee80211_hw *hw,
+                  struct ieee80211_vif *vif,
+                  struct cfg80211_scan_request *req);
+void cw1200_scan_work(struct work_struct *work);
+void cw1200_scan_timeout(struct work_struct *work);
+void cw1200_clear_recent_scan_work(struct work_struct *work);
+void cw1200_scan_complete_cb(struct cw1200_common *priv,
+                            struct wsm_scan_complete *arg);
+void cw1200_scan_failed_cb(struct cw1200_common *priv);
+
+/* ******************************************************************** */
+/* Raw probe requests TX workaround                                    */
+void cw1200_probe_work(struct work_struct *work);
+
+#endif
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
new file mode 100644 (file)
index 0000000..7365674
--- /dev/null
@@ -0,0 +1,2403 @@
+/*
+ * Mac80211 STA API for ST-Ericsson CW1200 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include "cw1200.h"
+#include "sta.h"
+#include "fwio.h"
+#include "bh.h"
+#include "debug.h"
+
+#ifndef ERP_INFO_BYTE_OFFSET
+#define ERP_INFO_BYTE_OFFSET 2
+#endif
+
+static void cw1200_do_join(struct cw1200_common *priv);
+static void cw1200_do_unjoin(struct cw1200_common *priv);
+
+static int cw1200_upload_beacon(struct cw1200_common *priv);
+static int cw1200_upload_pspoll(struct cw1200_common *priv);
+static int cw1200_upload_null(struct cw1200_common *priv);
+static int cw1200_upload_qosnull(struct cw1200_common *priv);
+static int cw1200_start_ap(struct cw1200_common *priv);
+static int cw1200_update_beaconing(struct cw1200_common *priv);
+static int cw1200_enable_beaconing(struct cw1200_common *priv,
+                                  bool enable);
+static void __cw1200_sta_notify(struct ieee80211_hw *dev,
+                               struct ieee80211_vif *vif,
+                               enum sta_notify_cmd notify_cmd,
+                               int link_id);
+static int __cw1200_flush(struct cw1200_common *priv, bool drop);
+
+static inline void __cw1200_free_event_queue(struct list_head *list)
+{
+       struct cw1200_wsm_event *event, *tmp;
+       list_for_each_entry_safe(event, tmp, list, link) {
+               list_del(&event->link);
+               kfree(event);
+       }
+}
+
+/* ******************************************************************** */
+/* STA API                                                             */
+
+int cw1200_start(struct ieee80211_hw *dev)
+{
+       struct cw1200_common *priv = dev->priv;
+       int ret = 0;
+
+       cw1200_pm_stay_awake(&priv->pm_state, HZ);
+
+       mutex_lock(&priv->conf_mutex);
+
+       /* default EDCA */
+       WSM_EDCA_SET(&priv->edca, 0, 0x0002, 0x0003, 0x0007, 47, 0xc8, false);
+       WSM_EDCA_SET(&priv->edca, 1, 0x0002, 0x0007, 0x000f, 94, 0xc8, false);
+       WSM_EDCA_SET(&priv->edca, 2, 0x0003, 0x000f, 0x03ff, 0, 0xc8, false);
+       WSM_EDCA_SET(&priv->edca, 3, 0x0007, 0x000f, 0x03ff, 0, 0xc8, false);
+       ret = wsm_set_edca_params(priv, &priv->edca);
+       if (ret)
+               goto out;
+
+       ret = cw1200_set_uapsd_param(priv, &priv->edca);
+       if (ret)
+               goto out;
+
+       priv->setbssparams_done = false;
+
+       memcpy(priv->mac_addr, dev->wiphy->perm_addr, ETH_ALEN);
+       priv->mode = NL80211_IFTYPE_MONITOR;
+       priv->wep_default_key_id = -1;
+
+       priv->cqm_beacon_loss_count = 10;
+
+       ret = cw1200_setup_mac(priv);
+       if (ret)
+               goto out;
+
+out:
+       mutex_unlock(&priv->conf_mutex);
+       return ret;
+}
+
+void cw1200_stop(struct ieee80211_hw *dev)
+{
+       struct cw1200_common *priv = dev->priv;
+       LIST_HEAD(list);
+       int i;
+
+       wsm_lock_tx(priv);
+
+       while (down_trylock(&priv->scan.lock)) {
+               /* Scan is in progress. Force it to stop. */
+               priv->scan.req = NULL;
+               schedule();
+       }
+       up(&priv->scan.lock);
+
+       cancel_delayed_work_sync(&priv->scan.probe_work);
+       cancel_delayed_work_sync(&priv->scan.timeout);
+       cancel_delayed_work_sync(&priv->clear_recent_scan_work);
+       cancel_delayed_work_sync(&priv->join_timeout);
+       cw1200_cqm_bssloss_sm(priv, 0, 0, 0);
+       cancel_work_sync(&priv->unjoin_work);
+       cancel_delayed_work_sync(&priv->link_id_gc_work);
+       flush_workqueue(priv->workqueue);
+       del_timer_sync(&priv->mcast_timeout);
+       mutex_lock(&priv->conf_mutex);
+       priv->mode = NL80211_IFTYPE_UNSPECIFIED;
+       priv->listening = false;
+
+       spin_lock(&priv->event_queue_lock);
+       list_splice_init(&priv->event_queue, &list);
+       spin_unlock(&priv->event_queue_lock);
+       __cw1200_free_event_queue(&list);
+
+
+       priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+       priv->join_pending = false;
+
+       for (i = 0; i < 4; i++)
+               cw1200_queue_clear(&priv->tx_queue[i]);
+       mutex_unlock(&priv->conf_mutex);
+       tx_policy_clean(priv);
+
+       /* HACK! */
+       if (atomic_xchg(&priv->tx_lock, 1) != 1)
+               pr_debug("[STA] TX is force-unlocked due to stop request.\n");
+
+       wsm_unlock_tx(priv);
+       atomic_xchg(&priv->tx_lock, 0); /* for recovery to work */
+}
+
+static int cw1200_bssloss_mitigation = 1;
+module_param(cw1200_bssloss_mitigation, int, 0644);
+MODULE_PARM_DESC(cw1200_bssloss_mitigation, "BSS Loss mitigation. 0 == disabled, 1 == enabled (default)");
+
+
+void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
+                            int init, int good, int bad)
+{
+       int tx = 0;
+
+       priv->delayed_link_loss = 0;
+       cancel_work_sync(&priv->bss_params_work);
+
+       pr_debug("[STA] CQM BSSLOSS_SM: state: %d init %d good %d bad: %d txlock: %d uj: %d\n",
+                priv->bss_loss_state,
+                init, good, bad,
+                atomic_read(&priv->tx_lock),
+                priv->delayed_unjoin);
+
+       /* If we have a pending unjoin */
+       if (priv->delayed_unjoin)
+               return;
+
+       if (init) {
+               queue_delayed_work(priv->workqueue,
+                                  &priv->bss_loss_work,
+                                  HZ);
+               priv->bss_loss_state = 0;
+
+               /* Skip the confimration procedure in P2P case */
+               if (!priv->vif->p2p && !atomic_read(&priv->tx_lock))
+                       tx = 1;
+       } else if (good) {
+               cancel_delayed_work_sync(&priv->bss_loss_work);
+               priv->bss_loss_state = 0;
+               queue_work(priv->workqueue, &priv->bss_params_work);
+       } else if (bad) {
+               /* XXX Should we just keep going until we time out? */
+               if (priv->bss_loss_state < 3)
+                       tx = 1;
+       } else {
+               cancel_delayed_work_sync(&priv->bss_loss_work);
+               priv->bss_loss_state = 0;
+       }
+
+       /* Bypass mitigation if it's disabled */
+       if (!cw1200_bssloss_mitigation)
+               tx = 0;
+
+       /* Spit out a NULL packet to our AP if necessary */
+       if (tx) {
+               struct sk_buff *skb;
+
+               priv->bss_loss_state++;
+
+               skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+               WARN_ON(!skb);
+               if (skb)
+                       cw1200_tx(priv->hw, NULL, skb);
+       }
+}
+
+int cw1200_add_interface(struct ieee80211_hw *dev,
+                        struct ieee80211_vif *vif)
+{
+       int ret;
+       struct cw1200_common *priv = dev->priv;
+       /* __le32 auto_calibration_mode = __cpu_to_le32(1); */
+
+       vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+                            IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+
+       mutex_lock(&priv->conf_mutex);
+
+       if (priv->mode != NL80211_IFTYPE_MONITOR) {
+               mutex_unlock(&priv->conf_mutex);
+               return -EOPNOTSUPP;
+       }
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_MESH_POINT:
+       case NL80211_IFTYPE_AP:
+               priv->mode = vif->type;
+               break;
+       default:
+               mutex_unlock(&priv->conf_mutex);
+               return -EOPNOTSUPP;
+       }
+
+       priv->vif = vif;
+       memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
+       ret = cw1200_setup_mac(priv);
+       /* Enable auto-calibration */
+       /* Exception in subsequent channel switch; disabled.
+        *  wsm_write_mib(priv, WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE,
+        *      &auto_calibration_mode, sizeof(auto_calibration_mode));
+       */
+
+       mutex_unlock(&priv->conf_mutex);
+       return ret;
+}
+
+void cw1200_remove_interface(struct ieee80211_hw *dev,
+                            struct ieee80211_vif *vif)
+{
+       struct cw1200_common *priv = dev->priv;
+       struct wsm_reset reset = {
+               .reset_statistics = true,
+       };
+       int i;
+
+       mutex_lock(&priv->conf_mutex);
+       switch (priv->join_status) {
+       case CW1200_JOIN_STATUS_JOINING:
+       case CW1200_JOIN_STATUS_PRE_STA:
+       case CW1200_JOIN_STATUS_STA:
+       case CW1200_JOIN_STATUS_IBSS:
+               wsm_lock_tx(priv);
+               if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+                       wsm_unlock_tx(priv);
+               break;
+       case CW1200_JOIN_STATUS_AP:
+               for (i = 0; priv->link_id_map; ++i) {
+                       if (priv->link_id_map & BIT(i)) {
+                               reset.link_id = i;
+                               wsm_reset(priv, &reset);
+                               priv->link_id_map &= ~BIT(i);
+                       }
+               }
+               memset(priv->link_id_db, 0, sizeof(priv->link_id_db));
+               priv->sta_asleep_mask = 0;
+               priv->enable_beacon = false;
+               priv->tx_multicast = false;
+               priv->aid0_bit_set = false;
+               priv->buffered_multicasts = false;
+               priv->pspoll_mask = 0;
+               reset.link_id = 0;
+               wsm_reset(priv, &reset);
+               break;
+       case CW1200_JOIN_STATUS_MONITOR:
+               cw1200_update_listening(priv, false);
+               break;
+       default:
+               break;
+       }
+       priv->vif = NULL;
+       priv->mode = NL80211_IFTYPE_MONITOR;
+       memset(priv->mac_addr, 0, ETH_ALEN);
+       memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo));
+       cw1200_free_keys(priv);
+       cw1200_setup_mac(priv);
+       priv->listening = false;
+       priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+       if (!__cw1200_flush(priv, true))
+               wsm_unlock_tx(priv);
+
+       mutex_unlock(&priv->conf_mutex);
+}
+
+int cw1200_change_interface(struct ieee80211_hw *dev,
+                           struct ieee80211_vif *vif,
+                           enum nl80211_iftype new_type,
+                           bool p2p)
+{
+       int ret = 0;
+       pr_debug("change_interface new: %d (%d), old: %d (%d)\n", new_type,
+                p2p, vif->type, vif->p2p);
+
+       if (new_type != vif->type || vif->p2p != p2p) {
+               cw1200_remove_interface(dev, vif);
+               vif->type = new_type;
+               vif->p2p = p2p;
+               ret = cw1200_add_interface(dev, vif);
+       }
+
+       return ret;
+}
+
+int cw1200_config(struct ieee80211_hw *dev, u32 changed)
+{
+       int ret = 0;
+       struct cw1200_common *priv = dev->priv;
+       struct ieee80211_conf *conf = &dev->conf;
+
+       pr_debug("CONFIG CHANGED:  %08x\n", changed);
+
+       down(&priv->scan.lock);
+       mutex_lock(&priv->conf_mutex);
+       /* TODO: IEEE80211_CONF_CHANGE_QOS */
+       /* TODO: IEEE80211_CONF_CHANGE_LISTEN_INTERVAL */
+
+       if (changed & IEEE80211_CONF_CHANGE_POWER) {
+               priv->output_power = conf->power_level;
+               pr_debug("[STA] TX power: %d\n", priv->output_power);
+               wsm_set_output_power(priv, priv->output_power * 10);
+       }
+
+       if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) &&
+           (priv->channel != conf->chandef.chan)) {
+               struct ieee80211_channel *ch = conf->chandef.chan;
+               struct wsm_switch_channel channel = {
+                       .channel_number = ch->hw_value,
+               };
+               pr_debug("[STA] Freq %d (wsm ch: %d).\n",
+                        ch->center_freq, ch->hw_value);
+
+               /* __cw1200_flush() implicitly locks tx, if successful */
+               if (!__cw1200_flush(priv, false)) {
+                       if (!wsm_switch_channel(priv, &channel)) {
+                               ret = wait_event_timeout(priv->channel_switch_done,
+                                                        !priv->channel_switch_in_progress,
+                                                        3 * HZ);
+                               if (ret) {
+                                       /* Already unlocks if successful */
+                                       priv->channel = ch;
+                                       ret = 0;
+                               } else {
+                                       ret = -ETIMEDOUT;
+                               }
+                       } else {
+                               /* Unlock if switch channel fails */
+                               wsm_unlock_tx(priv);
+                       }
+               }
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_PS) {
+               if (!(conf->flags & IEEE80211_CONF_PS))
+                       priv->powersave_mode.mode = WSM_PSM_ACTIVE;
+               else if (conf->dynamic_ps_timeout <= 0)
+                       priv->powersave_mode.mode = WSM_PSM_PS;
+               else
+                       priv->powersave_mode.mode = WSM_PSM_FAST_PS;
+
+               /* Firmware requires that value for this 1-byte field must
+                * be specified in units of 500us. Values above the 128ms
+                * threshold are not supported.
+                */
+               if (conf->dynamic_ps_timeout >= 0x80)
+                       priv->powersave_mode.fast_psm_idle_period = 0xFF;
+               else
+                       priv->powersave_mode.fast_psm_idle_period =
+                                       conf->dynamic_ps_timeout << 1;
+
+               if (priv->join_status == CW1200_JOIN_STATUS_STA &&
+                   priv->bss_params.aid)
+                       cw1200_set_pm(priv, &priv->powersave_mode);
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+               /* TBD: It looks like it's transparent
+                * there's a monitor interface present -- use this
+                * to determine for example whether to calculate
+                * timestamps for packets or not, do not use instead
+                * of filter flags!
+                */
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+               struct wsm_operational_mode mode = {
+                       .power_mode = cw1200_power_mode,
+                       .disable_more_flag_usage = true,
+               };
+
+               wsm_lock_tx(priv);
+               /* Disable p2p-dev mode forced by TX request */
+               if ((priv->join_status == CW1200_JOIN_STATUS_MONITOR) &&
+                   (conf->flags & IEEE80211_CONF_IDLE) &&
+                   !priv->listening) {
+                       cw1200_disable_listening(priv);
+                       priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+               }
+               wsm_set_operational_mode(priv, &mode);
+               wsm_unlock_tx(priv);
+       }
+
+       if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+               pr_debug("[STA] Retry limits: %d (long), %d (short).\n",
+                        conf->long_frame_max_tx_count,
+                        conf->short_frame_max_tx_count);
+               spin_lock_bh(&priv->tx_policy_cache.lock);
+               priv->long_frame_max_tx_count = conf->long_frame_max_tx_count;
+               priv->short_frame_max_tx_count =
+                       (conf->short_frame_max_tx_count < 0x0F) ?
+                       conf->short_frame_max_tx_count : 0x0F;
+               priv->hw->max_rate_tries = priv->short_frame_max_tx_count;
+               spin_unlock_bh(&priv->tx_policy_cache.lock);
+       }
+       mutex_unlock(&priv->conf_mutex);
+       up(&priv->scan.lock);
+       return ret;
+}
+
+void cw1200_update_filtering(struct cw1200_common *priv)
+{
+       int ret;
+       bool bssid_filtering = !priv->rx_filter.bssid;
+       bool is_p2p = priv->vif && priv->vif->p2p;
+       bool is_sta = priv->vif && NL80211_IFTYPE_STATION == priv->vif->type;
+
+       static struct wsm_beacon_filter_control bf_ctrl;
+       static struct wsm_mib_beacon_filter_table bf_tbl = {
+               .entry[0].ie_id = WLAN_EID_VENDOR_SPECIFIC,
+               .entry[0].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED |
+                                       WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT |
+                                       WSM_BEACON_FILTER_IE_HAS_APPEARED,
+               .entry[0].oui[0] = 0x50,
+               .entry[0].oui[1] = 0x6F,
+               .entry[0].oui[2] = 0x9A,
+               .entry[1].ie_id = WLAN_EID_HT_OPERATION,
+               .entry[1].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED |
+                                       WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT |
+                                       WSM_BEACON_FILTER_IE_HAS_APPEARED,
+               .entry[2].ie_id = WLAN_EID_ERP_INFO,
+               .entry[2].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED |
+                                       WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT |
+                                       WSM_BEACON_FILTER_IE_HAS_APPEARED,
+       };
+
+       if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE)
+               return;
+       else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR)
+               bssid_filtering = false;
+
+       if (priv->disable_beacon_filter) {
+               bf_ctrl.enabled = 0;
+               bf_ctrl.bcn_count = 1;
+               bf_tbl.num = __cpu_to_le32(0);
+       } else if (is_p2p || !is_sta) {
+               bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE |
+                       WSM_BEACON_FILTER_AUTO_ERP;
+               bf_ctrl.bcn_count = 0;
+               bf_tbl.num = __cpu_to_le32(2);
+       } else {
+               bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE;
+               bf_ctrl.bcn_count = 0;
+               bf_tbl.num = __cpu_to_le32(3);
+       }
+
+       /* When acting as p2p client being connected to p2p GO, in order to
+        * receive frames from a different p2p device, turn off bssid filter.
+        *
+        * WARNING: FW dependency!
+        * This can only be used with FW WSM371 and its successors.
+        * In that FW version even with bssid filter turned off,
+        * device will block most of the unwanted frames.
+        */
+       if (is_p2p)
+               bssid_filtering = false;
+
+       ret = wsm_set_rx_filter(priv, &priv->rx_filter);
+       if (!ret)
+               ret = wsm_set_beacon_filter_table(priv, &bf_tbl);
+       if (!ret)
+               ret = wsm_beacon_filter_control(priv, &bf_ctrl);
+       if (!ret)
+               ret = wsm_set_bssid_filtering(priv, bssid_filtering);
+       if (!ret)
+               ret = wsm_set_multicast_filter(priv, &priv->multicast_filter);
+       if (ret)
+               wiphy_err(priv->hw->wiphy,
+                         "Update filtering failed: %d.\n", ret);
+       return;
+}
+
+void cw1200_update_filtering_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common,
+                            update_filtering_work);
+
+       cw1200_update_filtering(priv);
+}
+
+void cw1200_set_beacon_wakeup_period_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common,
+                            set_beacon_wakeup_period_work);
+
+       wsm_set_beacon_wakeup_period(priv,
+                                    priv->beacon_int * priv->join_dtim_period >
+                                    MAX_BEACON_SKIP_TIME_MS ? 1 :
+                                    priv->join_dtim_period, 0);
+}
+
+u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
+                            struct netdev_hw_addr_list *mc_list)
+{
+       static u8 broadcast_ipv6[ETH_ALEN] = {
+               0x33, 0x33, 0x00, 0x00, 0x00, 0x01
+       };
+       static u8 broadcast_ipv4[ETH_ALEN] = {
+               0x01, 0x00, 0x5e, 0x00, 0x00, 0x01
+       };
+       struct cw1200_common *priv = hw->priv;
+       struct netdev_hw_addr *ha;
+       int count = 0;
+
+       /* Disable multicast filtering */
+       priv->has_multicast_subscription = false;
+       memset(&priv->multicast_filter, 0x00, sizeof(priv->multicast_filter));
+
+       if (netdev_hw_addr_list_count(mc_list) > WSM_MAX_GRP_ADDRTABLE_ENTRIES)
+               return 0;
+
+       /* Enable if requested */
+       netdev_hw_addr_list_for_each(ha, mc_list) {
+               pr_debug("[STA] multicast: %pM\n", ha->addr);
+               memcpy(&priv->multicast_filter.macaddrs[count],
+                      ha->addr, ETH_ALEN);
+               if (memcmp(ha->addr, broadcast_ipv4, ETH_ALEN) &&
+                   memcmp(ha->addr, broadcast_ipv6, ETH_ALEN))
+                       priv->has_multicast_subscription = true;
+               count++;
+       }
+
+       if (count) {
+               priv->multicast_filter.enable = __cpu_to_le32(1);
+               priv->multicast_filter.num_addrs = __cpu_to_le32(count);
+       }
+
+       return netdev_hw_addr_list_count(mc_list);
+}
+
+void cw1200_configure_filter(struct ieee80211_hw *dev,
+                            unsigned int changed_flags,
+                            unsigned int *total_flags,
+                            u64 multicast)
+{
+       struct cw1200_common *priv = dev->priv;
+       bool listening = !!(*total_flags &
+                           (FIF_PROMISC_IN_BSS |
+                            FIF_OTHER_BSS |
+                            FIF_BCN_PRBRESP_PROMISC |
+                            FIF_PROBE_REQ));
+
+       *total_flags &= FIF_PROMISC_IN_BSS |
+                       FIF_OTHER_BSS |
+                       FIF_FCSFAIL |
+                       FIF_BCN_PRBRESP_PROMISC |
+                       FIF_PROBE_REQ;
+
+       down(&priv->scan.lock);
+       mutex_lock(&priv->conf_mutex);
+
+       priv->rx_filter.promiscuous = (*total_flags & FIF_PROMISC_IN_BSS)
+                       ? 1 : 0;
+       priv->rx_filter.bssid = (*total_flags & (FIF_OTHER_BSS |
+                       FIF_PROBE_REQ)) ? 1 : 0;
+       priv->rx_filter.fcs = (*total_flags & FIF_FCSFAIL) ? 1 : 0;
+       priv->disable_beacon_filter = !(*total_flags &
+                                       (FIF_BCN_PRBRESP_PROMISC |
+                                        FIF_PROMISC_IN_BSS |
+                                        FIF_PROBE_REQ));
+       if (priv->listening != listening) {
+               priv->listening = listening;
+               wsm_lock_tx(priv);
+               cw1200_update_listening(priv, listening);
+               wsm_unlock_tx(priv);
+       }
+       cw1200_update_filtering(priv);
+       mutex_unlock(&priv->conf_mutex);
+       up(&priv->scan.lock);
+}
+
+int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+                  u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+       struct cw1200_common *priv = dev->priv;
+       int ret = 0;
+       /* To prevent re-applying PM request OID again and again*/
+       bool old_uapsd_flags;
+
+       mutex_lock(&priv->conf_mutex);
+
+       if (queue < dev->queues) {
+               old_uapsd_flags = le16_to_cpu(priv->uapsd_info.uapsd_flags);
+
+               WSM_TX_QUEUE_SET(&priv->tx_queue_params, queue, 0, 0, 0);
+               ret = wsm_set_tx_queue_params(priv,
+                                             &priv->tx_queue_params.params[queue], queue);
+               if (ret) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               WSM_EDCA_SET(&priv->edca, queue, params->aifs,
+                            params->cw_min, params->cw_max,
+                            params->txop, 0xc8,
+                            params->uapsd);
+               ret = wsm_set_edca_params(priv, &priv->edca);
+               if (ret) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               if (priv->mode == NL80211_IFTYPE_STATION) {
+                       ret = cw1200_set_uapsd_param(priv, &priv->edca);
+                       if (!ret && priv->setbssparams_done &&
+                           (priv->join_status == CW1200_JOIN_STATUS_STA) &&
+                           (old_uapsd_flags != le16_to_cpu(priv->uapsd_info.uapsd_flags)))
+                               ret = cw1200_set_pm(priv, &priv->powersave_mode);
+               }
+       } else {
+               ret = -EINVAL;
+       }
+
+out:
+       mutex_unlock(&priv->conf_mutex);
+       return ret;
+}
+
+int cw1200_get_stats(struct ieee80211_hw *dev,
+                    struct ieee80211_low_level_stats *stats)
+{
+       struct cw1200_common *priv = dev->priv;
+
+       memcpy(stats, &priv->stats, sizeof(*stats));
+       return 0;
+}
+
+int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg)
+{
+       struct wsm_set_pm pm = *arg;
+
+       if (priv->uapsd_info.uapsd_flags != 0)
+               pm.mode &= ~WSM_PSM_FAST_PS_FLAG;
+
+       if (memcmp(&pm, &priv->firmware_ps_mode,
+                  sizeof(struct wsm_set_pm))) {
+               priv->firmware_ps_mode = pm;
+               return wsm_set_pm(priv, &pm);
+       } else {
+               return 0;
+       }
+}
+
+int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
+                  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                  struct ieee80211_key_conf *key)
+{
+       int ret = -EOPNOTSUPP;
+       struct cw1200_common *priv = dev->priv;
+       struct ieee80211_key_seq seq;
+
+       mutex_lock(&priv->conf_mutex);
+
+       if (cmd == SET_KEY) {
+               u8 *peer_addr = NULL;
+               int pairwise = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) ?
+                       1 : 0;
+               int idx = cw1200_alloc_key(priv);
+               struct wsm_add_key *wsm_key = &priv->keys[idx];
+
+               if (idx < 0) {
+                       ret = -EINVAL;
+                       goto finally;
+               }
+
+               if (sta)
+                       peer_addr = sta->addr;
+
+               key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+
+               switch (key->cipher) {
+               case WLAN_CIPHER_SUITE_WEP40:
+               case WLAN_CIPHER_SUITE_WEP104:
+                       if (key->keylen > 16) {
+                               cw1200_free_key(priv, idx);
+                               ret = -EINVAL;
+                               goto finally;
+                       }
+
+                       if (pairwise) {
+                               wsm_key->type = WSM_KEY_TYPE_WEP_PAIRWISE;
+                               memcpy(wsm_key->wep_pairwise.peer,
+                                      peer_addr, ETH_ALEN);
+                               memcpy(wsm_key->wep_pairwise.keydata,
+                                      &key->key[0], key->keylen);
+                               wsm_key->wep_pairwise.keylen = key->keylen;
+                       } else {
+                               wsm_key->type = WSM_KEY_TYPE_WEP_DEFAULT;
+                               memcpy(wsm_key->wep_group.keydata,
+                                      &key->key[0], key->keylen);
+                               wsm_key->wep_group.keylen = key->keylen;
+                               wsm_key->wep_group.keyid = key->keyidx;
+                       }
+                       break;
+               case WLAN_CIPHER_SUITE_TKIP:
+                       ieee80211_get_key_rx_seq(key, 0, &seq);
+                       if (pairwise) {
+                               wsm_key->type = WSM_KEY_TYPE_TKIP_PAIRWISE;
+                               memcpy(wsm_key->tkip_pairwise.peer,
+                                      peer_addr, ETH_ALEN);
+                               memcpy(wsm_key->tkip_pairwise.keydata,
+                                      &key->key[0], 16);
+                               memcpy(wsm_key->tkip_pairwise.tx_mic_key,
+                                      &key->key[16], 8);
+                               memcpy(wsm_key->tkip_pairwise.rx_mic_key,
+                                      &key->key[24], 8);
+                       } else {
+                               size_t mic_offset =
+                                       (priv->mode == NL80211_IFTYPE_AP) ?
+                                       16 : 24;
+                               wsm_key->type = WSM_KEY_TYPE_TKIP_GROUP;
+                               memcpy(wsm_key->tkip_group.keydata,
+                                      &key->key[0], 16);
+                               memcpy(wsm_key->tkip_group.rx_mic_key,
+                                      &key->key[mic_offset], 8);
+
+                               wsm_key->tkip_group.rx_seqnum[0] = seq.tkip.iv16 & 0xff;
+                               wsm_key->tkip_group.rx_seqnum[1] = (seq.tkip.iv16 >> 8) & 0xff;
+                               wsm_key->tkip_group.rx_seqnum[2] = seq.tkip.iv32 & 0xff;
+                               wsm_key->tkip_group.rx_seqnum[3] = (seq.tkip.iv32 >> 8) & 0xff;
+                               wsm_key->tkip_group.rx_seqnum[4] = (seq.tkip.iv32 >> 16) & 0xff;
+                               wsm_key->tkip_group.rx_seqnum[5] = (seq.tkip.iv32 >> 24) & 0xff;
+                               wsm_key->tkip_group.rx_seqnum[6] = 0;
+                               wsm_key->tkip_group.rx_seqnum[7] = 0;
+
+                               wsm_key->tkip_group.keyid = key->keyidx;
+                       }
+                       break;
+               case WLAN_CIPHER_SUITE_CCMP:
+                       ieee80211_get_key_rx_seq(key, 0, &seq);
+                       if (pairwise) {
+                               wsm_key->type = WSM_KEY_TYPE_AES_PAIRWISE;
+                               memcpy(wsm_key->aes_pairwise.peer,
+                                      peer_addr, ETH_ALEN);
+                               memcpy(wsm_key->aes_pairwise.keydata,
+                                      &key->key[0], 16);
+                       } else {
+                               wsm_key->type = WSM_KEY_TYPE_AES_GROUP;
+                               memcpy(wsm_key->aes_group.keydata,
+                                      &key->key[0], 16);
+
+                               wsm_key->aes_group.rx_seqnum[0] = seq.ccmp.pn[5];
+                               wsm_key->aes_group.rx_seqnum[1] = seq.ccmp.pn[4];
+                               wsm_key->aes_group.rx_seqnum[2] = seq.ccmp.pn[3];
+                               wsm_key->aes_group.rx_seqnum[3] = seq.ccmp.pn[2];
+                               wsm_key->aes_group.rx_seqnum[4] = seq.ccmp.pn[1];
+                               wsm_key->aes_group.rx_seqnum[5] = seq.ccmp.pn[0];
+                               wsm_key->aes_group.rx_seqnum[6] = 0;
+                               wsm_key->aes_group.rx_seqnum[7] = 0;
+                               wsm_key->aes_group.keyid = key->keyidx;
+                       }
+                       break;
+               case WLAN_CIPHER_SUITE_SMS4:
+                       if (pairwise) {
+                               wsm_key->type = WSM_KEY_TYPE_WAPI_PAIRWISE;
+                               memcpy(wsm_key->wapi_pairwise.peer,
+                                      peer_addr, ETH_ALEN);
+                               memcpy(wsm_key->wapi_pairwise.keydata,
+                                      &key->key[0], 16);
+                               memcpy(wsm_key->wapi_pairwise.mic_key,
+                                      &key->key[16], 16);
+                               wsm_key->wapi_pairwise.keyid = key->keyidx;
+                       } else {
+                               wsm_key->type = WSM_KEY_TYPE_WAPI_GROUP;
+                               memcpy(wsm_key->wapi_group.keydata,
+                                      &key->key[0],  16);
+                               memcpy(wsm_key->wapi_group.mic_key,
+                                      &key->key[16], 16);
+                               wsm_key->wapi_group.keyid = key->keyidx;
+                       }
+                       break;
+               default:
+                       pr_warn("Unhandled key type %d\n", key->cipher);
+                       cw1200_free_key(priv, idx);
+                       ret = -EOPNOTSUPP;
+                       goto finally;
+               }
+               ret = wsm_add_key(priv, wsm_key);
+               if (!ret)
+                       key->hw_key_idx = idx;
+               else
+                       cw1200_free_key(priv, idx);
+       } else if (cmd == DISABLE_KEY) {
+               struct wsm_remove_key wsm_key = {
+                       .index = key->hw_key_idx,
+               };
+
+               if (wsm_key.index > WSM_KEY_MAX_INDEX) {
+                       ret = -EINVAL;
+                       goto finally;
+               }
+
+               cw1200_free_key(priv, wsm_key.index);
+               ret = wsm_remove_key(priv, &wsm_key);
+       } else {
+               pr_warn("Unhandled key command %d\n", cmd);
+       }
+
+finally:
+       mutex_unlock(&priv->conf_mutex);
+       return ret;
+}
+
+void cw1200_wep_key_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, wep_key_work);
+       u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id);
+       struct cw1200_queue *queue = &priv->tx_queue[queue_id];
+       __le32 wep_default_key_id = __cpu_to_le32(
+               priv->wep_default_key_id);
+
+       pr_debug("[STA] Setting default WEP key: %d\n",
+                priv->wep_default_key_id);
+       wsm_flush_tx(priv);
+       wsm_write_mib(priv, WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID,
+                     &wep_default_key_id, sizeof(wep_default_key_id));
+       cw1200_queue_requeue(queue, priv->pending_frame_id);
+       wsm_unlock_tx(priv);
+}
+
+int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+       int ret = 0;
+       __le32 val32;
+       struct cw1200_common *priv = hw->priv;
+
+       if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+               return 0;
+
+       if (value != (u32) -1)
+               val32 = __cpu_to_le32(value);
+       else
+               val32 = 0; /* disabled */
+
+       if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
+               /* device is down, can _not_ set threshold */
+               ret = -ENODEV;
+               goto out;
+       }
+
+       if (priv->rts_threshold == value)
+               goto out;
+
+       pr_debug("[STA] Setting RTS threshold: %d\n",
+                priv->rts_threshold);
+
+       /* mutex_lock(&priv->conf_mutex); */
+       ret = wsm_write_mib(priv, WSM_MIB_ID_DOT11_RTS_THRESHOLD,
+                           &val32, sizeof(val32));
+       if (!ret)
+               priv->rts_threshold = value;
+       /* mutex_unlock(&priv->conf_mutex); */
+
+out:
+       return ret;
+}
+
+/* If successful, LOCKS the TX queue! */
+static int __cw1200_flush(struct cw1200_common *priv, bool drop)
+{
+       int i, ret;
+
+       for (;;) {
+               /* TODO: correct flush handling is required when dev_stop.
+                * Temporary workaround: 2s
+                */
+               if (drop) {
+                       for (i = 0; i < 4; ++i)
+                               cw1200_queue_clear(&priv->tx_queue[i]);
+               } else {
+                       ret = wait_event_timeout(
+                               priv->tx_queue_stats.wait_link_id_empty,
+                               cw1200_queue_stats_is_empty(
+                                       &priv->tx_queue_stats, -1),
+                               2 * HZ);
+               }
+
+               if (!drop && ret <= 0) {
+                       ret = -ETIMEDOUT;
+                       break;
+               } else {
+                       ret = 0;
+               }
+
+               wsm_lock_tx(priv);
+               if (!cw1200_queue_stats_is_empty(&priv->tx_queue_stats, -1)) {
+                       /* Highly unlikely: WSM requeued frames. */
+                       wsm_unlock_tx(priv);
+                       continue;
+               }
+               break;
+       }
+       return ret;
+}
+
+void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
+{
+       struct cw1200_common *priv = hw->priv;
+
+       switch (priv->mode) {
+       case NL80211_IFTYPE_MONITOR:
+               drop = true;
+               break;
+       case NL80211_IFTYPE_AP:
+               if (!priv->enable_beacon)
+                       drop = true;
+               break;
+       }
+
+       if (!__cw1200_flush(priv, drop))
+               wsm_unlock_tx(priv);
+
+       return;
+}
+
+/* ******************************************************************** */
+/* WSM callbacks                                                       */
+
+void cw1200_free_event_queue(struct cw1200_common *priv)
+{
+       LIST_HEAD(list);
+
+       spin_lock(&priv->event_queue_lock);
+       list_splice_init(&priv->event_queue, &list);
+       spin_unlock(&priv->event_queue_lock);
+
+       __cw1200_free_event_queue(&list);
+}
+
+void cw1200_event_handler(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, event_handler);
+       struct cw1200_wsm_event *event;
+       LIST_HEAD(list);
+
+       spin_lock(&priv->event_queue_lock);
+       list_splice_init(&priv->event_queue, &list);
+       spin_unlock(&priv->event_queue_lock);
+
+       list_for_each_entry(event, &list, link) {
+               switch (event->evt.id) {
+               case WSM_EVENT_ERROR:
+                       pr_err("Unhandled WSM Error from LMAC\n");
+                       break;
+               case WSM_EVENT_BSS_LOST:
+                       pr_debug("[CQM] BSS lost.\n");
+                       cancel_work_sync(&priv->unjoin_work);
+                       if (!down_trylock(&priv->scan.lock)) {
+                               cw1200_cqm_bssloss_sm(priv, 1, 0, 0);
+                               up(&priv->scan.lock);
+                       } else {
+                               /* Scan is in progress. Delay reporting.
+                                * Scan complete will trigger bss_loss_work
+                                */
+                               priv->delayed_link_loss = 1;
+                               /* Also start a watchdog. */
+                               queue_delayed_work(priv->workqueue,
+                                                  &priv->bss_loss_work, 5*HZ);
+                       }
+                       break;
+               case WSM_EVENT_BSS_REGAINED:
+                       pr_debug("[CQM] BSS regained.\n");
+                       cw1200_cqm_bssloss_sm(priv, 0, 0, 0);
+                       cancel_work_sync(&priv->unjoin_work);
+                       break;
+               case WSM_EVENT_RADAR_DETECTED:
+                       wiphy_info(priv->hw->wiphy, "radar pulse detected\n");
+                       break;
+               case WSM_EVENT_RCPI_RSSI:
+               {
+                       /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
+                        * RSSI = RCPI / 2 - 110
+                        */
+                       int rcpi_rssi = (int)(event->evt.data & 0xFF);
+                       int cqm_evt;
+                       if (priv->cqm_use_rssi)
+                               rcpi_rssi = (s8)rcpi_rssi;
+                       else
+                               rcpi_rssi =  rcpi_rssi / 2 - 110;
+
+                       cqm_evt = (rcpi_rssi <= priv->cqm_rssi_thold) ?
+                               NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW :
+                               NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+                       pr_debug("[CQM] RSSI event: %d.\n", rcpi_rssi);
+                       ieee80211_cqm_rssi_notify(priv->vif, cqm_evt,
+                                                 GFP_KERNEL);
+                       break;
+               }
+               case WSM_EVENT_BT_INACTIVE:
+                       pr_warn("Unhandled BT INACTIVE from LMAC\n");
+                       break;
+               case WSM_EVENT_BT_ACTIVE:
+                       pr_warn("Unhandled BT ACTIVE from LMAC\n");
+                       break;
+               }
+       }
+       __cw1200_free_event_queue(&list);
+}
+
+void cw1200_bss_loss_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, bss_loss_work.work);
+
+       pr_debug("[CQM] Reporting connection loss.\n");
+       wsm_lock_tx(priv);
+       if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+               wsm_unlock_tx(priv);
+}
+
+void cw1200_bss_params_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, bss_params_work);
+       mutex_lock(&priv->conf_mutex);
+
+       priv->bss_params.reset_beacon_loss = 1;
+       wsm_set_bss_params(priv, &priv->bss_params);
+       priv->bss_params.reset_beacon_loss = 0;
+
+       mutex_unlock(&priv->conf_mutex);
+}
+
+/* ******************************************************************** */
+/* Internal API                                                                */
+
+/* This function is called to Parse the SDD file
+ * to extract listen_interval and PTA related information
+ * sdd is a TLV: u8 id, u8 len, u8 data[]
+ */
+static int cw1200_parse_sdd_file(struct cw1200_common *priv)
+{
+       const u8 *p = priv->sdd->data;
+       int ret = 0;
+
+       while (p + 2 <= priv->sdd->data + priv->sdd->size) {
+               if (p + p[1] + 2 > priv->sdd->data + priv->sdd->size) {
+                       pr_warn("Malformed sdd structure\n");
+                       return -1;
+               }
+               switch (p[0]) {
+               case SDD_PTA_CFG_ELT_ID: {
+                       u16 v;
+                       if (p[1] < 4) {
+                               pr_warn("SDD_PTA_CFG_ELT_ID malformed\n");
+                               ret = -1;
+                               break;
+                       }
+                       v = le16_to_cpu(*((__le16 *)(p + 2)));
+                       if (!v)  /* non-zero means this is enabled */
+                               break;
+
+                       v = le16_to_cpu(*((__le16 *)(p + 4)));
+                       priv->conf_listen_interval = (v >> 7) & 0x1F;
+                       pr_debug("PTA found; Listen Interval %d\n",
+                                priv->conf_listen_interval);
+                       break;
+               }
+               case SDD_REFERENCE_FREQUENCY_ELT_ID: {
+                       u16 clk = le16_to_cpu(*((__le16 *)(p + 2)));
+                       if (clk != priv->hw_refclk)
+                               pr_warn("SDD file doesn't match configured refclk (%d vs %d)\n",
+                                       clk, priv->hw_refclk);
+                       break;
+               }
+               default:
+                       break;
+               }
+               p += p[1] + 2;
+       }
+
+       if (!priv->bt_present) {
+               pr_debug("PTA element NOT found.\n");
+               priv->conf_listen_interval = 0;
+       }
+       return ret;
+}
+
+int cw1200_setup_mac(struct cw1200_common *priv)
+{
+       int ret = 0;
+
+       /* NOTE: There is a bug in FW: it reports signal
+        * as RSSI if RSSI subscription is enabled.
+        * It's not enough to set WSM_RCPI_RSSI_USE_RSSI.
+        *
+        * NOTE2: RSSI based reports have been switched to RCPI, since
+        * FW has a bug and RSSI reported values are not stable,
+        * what can leads to signal level oscilations in user-end applications
+        */
+       struct wsm_rcpi_rssi_threshold threshold = {
+               .rssiRcpiMode = WSM_RCPI_RSSI_THRESHOLD_ENABLE |
+               WSM_RCPI_RSSI_DONT_USE_UPPER |
+               WSM_RCPI_RSSI_DONT_USE_LOWER,
+               .rollingAverageCount = 16,
+       };
+
+       struct wsm_configuration cfg = {
+               .dot11StationId = &priv->mac_addr[0],
+       };
+
+       /* Remember the decission here to make sure, we will handle
+        * the RCPI/RSSI value correctly on WSM_EVENT_RCPI_RSS
+        */
+       if (threshold.rssiRcpiMode & WSM_RCPI_RSSI_USE_RSSI)
+               priv->cqm_use_rssi = true;
+
+       if (!priv->sdd) {
+               ret = request_firmware(&priv->sdd, priv->sdd_path, priv->pdev);
+               if (ret) {
+                       pr_err("Can't load sdd file %s.\n", priv->sdd_path);
+                       return ret;
+               }
+               cw1200_parse_sdd_file(priv);
+       }
+
+       cfg.dpdData = priv->sdd->data;
+       cfg.dpdData_size = priv->sdd->size;
+       ret = wsm_configuration(priv, &cfg);
+       if (ret)
+               return ret;
+
+       /* Configure RSSI/SCPI reporting as RSSI. */
+       wsm_set_rcpi_rssi_threshold(priv, &threshold);
+
+       return 0;
+}
+
+static void cw1200_join_complete(struct cw1200_common *priv)
+{
+       pr_debug("[STA] Join complete (%d)\n", priv->join_complete_status);
+
+       priv->join_pending = false;
+       if (priv->join_complete_status) {
+               priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+               cw1200_update_listening(priv, priv->listening);
+               cw1200_do_unjoin(priv);
+               ieee80211_connection_loss(priv->vif);
+       } else {
+               if (priv->mode == NL80211_IFTYPE_ADHOC)
+                       priv->join_status = CW1200_JOIN_STATUS_IBSS;
+               else
+                       priv->join_status = CW1200_JOIN_STATUS_PRE_STA;
+       }
+       wsm_unlock_tx(priv); /* Clearing the lock held before do_join() */
+}
+
+void cw1200_join_complete_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, join_complete_work);
+       mutex_lock(&priv->conf_mutex);
+       cw1200_join_complete(priv);
+       mutex_unlock(&priv->conf_mutex);
+}
+
+void cw1200_join_complete_cb(struct cw1200_common *priv,
+                            struct wsm_join_complete *arg)
+{
+       pr_debug("[STA] cw1200_join_complete_cb called, status=%d.\n",
+                arg->status);
+
+       if (cancel_delayed_work(&priv->join_timeout)) {
+               priv->join_complete_status = arg->status;
+               queue_work(priv->workqueue, &priv->join_complete_work);
+       }
+}
+
+/* MUST be called with tx_lock held!  It will be unlocked for us. */
+static void cw1200_do_join(struct cw1200_common *priv)
+{
+       const u8 *bssid;
+       struct ieee80211_bss_conf *conf = &priv->vif->bss_conf;
+       struct cfg80211_bss *bss = NULL;
+       struct wsm_protected_mgmt_policy mgmt_policy;
+       struct wsm_join join = {
+               .mode = conf->ibss_joined ?
+                               WSM_JOIN_MODE_IBSS : WSM_JOIN_MODE_BSS,
+               .preamble_type = WSM_JOIN_PREAMBLE_LONG,
+               .probe_for_join = 1,
+               .atim_window = 0,
+               .basic_rate_set = cw1200_rate_mask_to_wsm(priv,
+                                                         conf->basic_rates),
+       };
+       if (delayed_work_pending(&priv->join_timeout)) {
+               pr_warn("[STA] - Join request already pending, skipping..\n");
+               wsm_unlock_tx(priv);
+               return;
+       }
+
+       if (priv->join_status)
+               cw1200_do_unjoin(priv);
+
+       bssid = priv->vif->bss_conf.bssid;
+
+       bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel,
+                       bssid, NULL, 0, 0, 0);
+
+       if (!bss && !conf->ibss_joined) {
+               wsm_unlock_tx(priv);
+               return;
+       }
+
+       mutex_lock(&priv->conf_mutex);
+
+       /* Under the conf lock: check scan status and
+        * bail out if it is in progress.
+        */
+       if (atomic_read(&priv->scan.in_progress)) {
+               wsm_unlock_tx(priv);
+               goto done_put;
+       }
+
+       priv->join_pending = true;
+
+       /* Sanity check basic rates */
+       if (!join.basic_rate_set)
+               join.basic_rate_set = 7;
+
+       /* Sanity check beacon interval */
+       if (!priv->beacon_int)
+               priv->beacon_int = 1;
+
+       join.beacon_interval = priv->beacon_int;
+
+       /* BT Coex related changes */
+       if (priv->bt_present) {
+               if (((priv->conf_listen_interval * 100) %
+                    priv->beacon_int) == 0)
+                       priv->listen_interval =
+                               ((priv->conf_listen_interval * 100) /
+                                priv->beacon_int);
+               else
+                       priv->listen_interval =
+                               ((priv->conf_listen_interval * 100) /
+                                priv->beacon_int + 1);
+       }
+
+       if (priv->hw->conf.ps_dtim_period)
+               priv->join_dtim_period = priv->hw->conf.ps_dtim_period;
+       join.dtim_period = priv->join_dtim_period;
+
+       join.channel_number = priv->channel->hw_value;
+       join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+               WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
+
+       memcpy(join.bssid, bssid, sizeof(join.bssid));
+
+       pr_debug("[STA] Join BSSID: %pM DTIM: %d, interval: %d\n",
+                join.bssid,
+                join.dtim_period, priv->beacon_int);
+
+       if (!conf->ibss_joined) {
+               const u8 *ssidie;
+               rcu_read_lock();
+               ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+               if (ssidie) {
+                       join.ssid_len = ssidie[1];
+                       memcpy(join.ssid, &ssidie[2], join.ssid_len);
+               }
+               rcu_read_unlock();
+       }
+
+       if (priv->vif->p2p) {
+               join.flags |= WSM_JOIN_FLAGS_P2P_GO;
+               join.basic_rate_set =
+                       cw1200_rate_mask_to_wsm(priv, 0xFF0);
+       }
+
+       /* Enable asynchronous join calls */
+       if (!conf->ibss_joined) {
+               join.flags |= WSM_JOIN_FLAGS_FORCE;
+               join.flags |= WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND;
+       }
+
+       wsm_flush_tx(priv);
+
+       /* Stay Awake for Join and Auth Timeouts and a bit more */
+       cw1200_pm_stay_awake(&priv->pm_state,
+                            CW1200_JOIN_TIMEOUT + CW1200_AUTH_TIMEOUT);
+
+       cw1200_update_listening(priv, false);
+
+       /* Turn on Block ACKs */
+       wsm_set_block_ack_policy(priv, priv->ba_tx_tid_mask,
+                                priv->ba_rx_tid_mask);
+
+       /* Set up timeout */
+       if (join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND) {
+               priv->join_status = CW1200_JOIN_STATUS_JOINING;
+               queue_delayed_work(priv->workqueue,
+                                  &priv->join_timeout,
+                                  CW1200_JOIN_TIMEOUT);
+       }
+
+       /* 802.11w protected mgmt frames */
+       mgmt_policy.protectedMgmtEnable = 0;
+       mgmt_policy.unprotectedMgmtFramesAllowed = 1;
+       mgmt_policy.encryptionForAuthFrame = 1;
+       wsm_set_protected_mgmt_policy(priv, &mgmt_policy);
+
+       /* Perform actual join */
+       if (wsm_join(priv, &join)) {
+               pr_err("[STA] cw1200_join_work: wsm_join failed!\n");
+               cancel_delayed_work_sync(&priv->join_timeout);
+               cw1200_update_listening(priv, priv->listening);
+               /* Tx lock still held, unjoin will clear it. */
+               if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+                       wsm_unlock_tx(priv);
+       } else {
+               if (!(join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND))
+                       cw1200_join_complete(priv); /* Will clear tx_lock */
+
+               /* Upload keys */
+               cw1200_upload_keys(priv);
+
+               /* Due to beacon filtering it is possible that the
+                * AP's beacon is not known for the mac80211 stack.
+                * Disable filtering temporary to make sure the stack
+                * receives at least one
+                */
+               priv->disable_beacon_filter = true;
+       }
+       cw1200_update_filtering(priv);
+
+done_put:
+       mutex_unlock(&priv->conf_mutex);
+       if (bss)
+               cfg80211_put_bss(priv->hw->wiphy, bss);
+}
+
+void cw1200_join_timeout(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, join_timeout.work);
+       pr_debug("[WSM] Join timed out.\n");
+       wsm_lock_tx(priv);
+       if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+               wsm_unlock_tx(priv);
+}
+
+static void cw1200_do_unjoin(struct cw1200_common *priv)
+{
+       struct wsm_reset reset = {
+               .reset_statistics = true,
+       };
+
+       cancel_delayed_work_sync(&priv->join_timeout);
+
+       mutex_lock(&priv->conf_mutex);
+       priv->join_pending = false;
+
+       if (atomic_read(&priv->scan.in_progress)) {
+               if (priv->delayed_unjoin)
+                       wiphy_dbg(priv->hw->wiphy, "Delayed unjoin is already scheduled.\n");
+               else
+                       priv->delayed_unjoin = true;
+               goto done;
+       }
+
+       priv->delayed_link_loss = false;
+
+       if (!priv->join_status)
+               goto done;
+
+       if (priv->join_status > CW1200_JOIN_STATUS_IBSS) {
+               wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n",
+                         priv->join_status);
+               BUG_ON(1);
+       }
+
+       cancel_work_sync(&priv->update_filtering_work);
+       cancel_work_sync(&priv->set_beacon_wakeup_period_work);
+       priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+
+       /* Unjoin is a reset. */
+       wsm_flush_tx(priv);
+       wsm_keep_alive_period(priv, 0);
+       wsm_reset(priv, &reset);
+       wsm_set_output_power(priv, priv->output_power * 10);
+       priv->join_dtim_period = 0;
+       cw1200_setup_mac(priv);
+       cw1200_free_event_queue(priv);
+       cancel_work_sync(&priv->event_handler);
+       cw1200_update_listening(priv, priv->listening);
+       cw1200_cqm_bssloss_sm(priv, 0, 0, 0);
+
+       /* Disable Block ACKs */
+       wsm_set_block_ack_policy(priv, 0, 0);
+
+       priv->disable_beacon_filter = false;
+       cw1200_update_filtering(priv);
+       memset(&priv->association_mode, 0,
+              sizeof(priv->association_mode));
+       memset(&priv->bss_params, 0, sizeof(priv->bss_params));
+       priv->setbssparams_done = false;
+       memset(&priv->firmware_ps_mode, 0,
+              sizeof(priv->firmware_ps_mode));
+
+       pr_debug("[STA] Unjoin completed.\n");
+
+done:
+       mutex_unlock(&priv->conf_mutex);
+}
+
+void cw1200_unjoin_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, unjoin_work);
+
+       cw1200_do_unjoin(priv);
+
+       /* Tell the stack we're dead */
+       ieee80211_connection_loss(priv->vif);
+
+       wsm_unlock_tx(priv);
+}
+
+int cw1200_enable_listening(struct cw1200_common *priv)
+{
+       struct wsm_start start = {
+               .mode = WSM_START_MODE_P2P_DEV,
+               .band = WSM_PHY_BAND_2_4G,
+               .beacon_interval = 100,
+               .dtim_period = 1,
+               .probe_delay = 0,
+               .basic_rate_set = 0x0F,
+       };
+
+       if (priv->channel) {
+               start.band = priv->channel->band == IEEE80211_BAND_5GHZ ?
+                            WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
+               start.channel_number = priv->channel->hw_value;
+       } else {
+               start.band = WSM_PHY_BAND_2_4G;
+               start.channel_number = 1;
+       }
+
+       return wsm_start(priv, &start);
+}
+
+int cw1200_disable_listening(struct cw1200_common *priv)
+{
+       int ret;
+       struct wsm_reset reset = {
+               .reset_statistics = true,
+       };
+       ret = wsm_reset(priv, &reset);
+       return ret;
+}
+
+void cw1200_update_listening(struct cw1200_common *priv, bool enabled)
+{
+       if (enabled) {
+               if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE) {
+                       if (!cw1200_enable_listening(priv))
+                               priv->join_status = CW1200_JOIN_STATUS_MONITOR;
+                       wsm_set_probe_responder(priv, true);
+               }
+       } else {
+               if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) {
+                       if (!cw1200_disable_listening(priv))
+                               priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+                       wsm_set_probe_responder(priv, false);
+               }
+       }
+}
+
+int cw1200_set_uapsd_param(struct cw1200_common *priv,
+                          const struct wsm_edca_params *arg)
+{
+       int ret;
+       u16 uapsd_flags = 0;
+
+       /* Here's the mapping AC [queue, bit]
+        *  VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0]
+        */
+
+       if (arg->uapsd_enable[0])
+               uapsd_flags |= 1 << 3;
+
+       if (arg->uapsd_enable[1])
+               uapsd_flags |= 1 << 2;
+
+       if (arg->uapsd_enable[2])
+               uapsd_flags |= 1 << 1;
+
+       if (arg->uapsd_enable[3])
+               uapsd_flags |= 1;
+
+       /* Currently pseudo U-APSD operation is not supported, so setting
+        * MinAutoTriggerInterval, MaxAutoTriggerInterval and
+        * AutoTriggerStep to 0
+        */
+
+       priv->uapsd_info.uapsd_flags = cpu_to_le16(uapsd_flags);
+       priv->uapsd_info.min_auto_trigger_interval = 0;
+       priv->uapsd_info.max_auto_trigger_interval = 0;
+       priv->uapsd_info.auto_trigger_step = 0;
+
+       ret = wsm_set_uapsd_info(priv, &priv->uapsd_info);
+       return ret;
+}
+
+/* ******************************************************************** */
+/* AP API                                                              */
+
+int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                  struct ieee80211_sta *sta)
+{
+       struct cw1200_common *priv = hw->priv;
+       struct cw1200_sta_priv *sta_priv =
+                       (struct cw1200_sta_priv *)&sta->drv_priv;
+       struct cw1200_link_entry *entry;
+       struct sk_buff *skb;
+
+       if (priv->mode != NL80211_IFTYPE_AP)
+               return 0;
+
+       sta_priv->link_id = cw1200_find_link_id(priv, sta->addr);
+       if (WARN_ON(!sta_priv->link_id)) {
+               wiphy_info(priv->hw->wiphy,
+                          "[AP] No more link IDs available.\n");
+               return -ENOENT;
+       }
+
+       entry = &priv->link_id_db[sta_priv->link_id - 1];
+       spin_lock_bh(&priv->ps_state_lock);
+       if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) ==
+                                       IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+               priv->sta_asleep_mask |= BIT(sta_priv->link_id);
+       entry->status = CW1200_LINK_HARD;
+       while ((skb = skb_dequeue(&entry->rx_queue)))
+               ieee80211_rx_irqsafe(priv->hw, skb);
+       spin_unlock_bh(&priv->ps_state_lock);
+       return 0;
+}
+
+int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                     struct ieee80211_sta *sta)
+{
+       struct cw1200_common *priv = hw->priv;
+       struct cw1200_sta_priv *sta_priv =
+                       (struct cw1200_sta_priv *)&sta->drv_priv;
+       struct cw1200_link_entry *entry;
+
+       if (priv->mode != NL80211_IFTYPE_AP || !sta_priv->link_id)
+               return 0;
+
+       entry = &priv->link_id_db[sta_priv->link_id - 1];
+       spin_lock_bh(&priv->ps_state_lock);
+       entry->status = CW1200_LINK_RESERVE;
+       entry->timestamp = jiffies;
+       wsm_lock_tx_async(priv);
+       if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
+               wsm_unlock_tx(priv);
+       spin_unlock_bh(&priv->ps_state_lock);
+       flush_workqueue(priv->workqueue);
+       return 0;
+}
+
+static void __cw1200_sta_notify(struct ieee80211_hw *dev,
+                               struct ieee80211_vif *vif,
+                               enum sta_notify_cmd notify_cmd,
+                               int link_id)
+{
+       struct cw1200_common *priv = dev->priv;
+       u32 bit, prev;
+
+       /* Zero link id means "for all link IDs" */
+       if (link_id)
+               bit = BIT(link_id);
+       else if (WARN_ON_ONCE(notify_cmd != STA_NOTIFY_AWAKE))
+               bit = 0;
+       else
+               bit = priv->link_id_map;
+       prev = priv->sta_asleep_mask & bit;
+
+       switch (notify_cmd) {
+       case STA_NOTIFY_SLEEP:
+               if (!prev) {
+                       if (priv->buffered_multicasts &&
+                           !priv->sta_asleep_mask)
+                               queue_work(priv->workqueue,
+                                          &priv->multicast_start_work);
+                       priv->sta_asleep_mask |= bit;
+               }
+               break;
+       case STA_NOTIFY_AWAKE:
+               if (prev) {
+                       priv->sta_asleep_mask &= ~bit;
+                       priv->pspoll_mask &= ~bit;
+                       if (priv->tx_multicast && link_id &&
+                           !priv->sta_asleep_mask)
+                               queue_work(priv->workqueue,
+                                          &priv->multicast_stop_work);
+                       cw1200_bh_wakeup(priv);
+               }
+               break;
+       }
+}
+
+void cw1200_sta_notify(struct ieee80211_hw *dev,
+                      struct ieee80211_vif *vif,
+                      enum sta_notify_cmd notify_cmd,
+                      struct ieee80211_sta *sta)
+{
+       struct cw1200_common *priv = dev->priv;
+       struct cw1200_sta_priv *sta_priv =
+               (struct cw1200_sta_priv *)&sta->drv_priv;
+
+       spin_lock_bh(&priv->ps_state_lock);
+       __cw1200_sta_notify(dev, vif, notify_cmd, sta_priv->link_id);
+       spin_unlock_bh(&priv->ps_state_lock);
+}
+
+static void cw1200_ps_notify(struct cw1200_common *priv,
+                     int link_id, bool ps)
+{
+       if (link_id > CW1200_MAX_STA_IN_AP_MODE)
+               return;
+
+       pr_debug("%s for LinkId: %d. STAs asleep: %.8X\n",
+                ps ? "Stop" : "Start",
+                link_id, priv->sta_asleep_mask);
+
+       __cw1200_sta_notify(priv->hw, priv->vif,
+                           ps ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, link_id);
+}
+
+static int cw1200_set_tim_impl(struct cw1200_common *priv, bool aid0_bit_set)
+{
+       struct sk_buff *skb;
+       struct wsm_update_ie update_ie = {
+               .what = WSM_UPDATE_IE_BEACON,
+               .count = 1,
+       };
+       u16 tim_offset, tim_length;
+
+       pr_debug("[AP] mcast: %s.\n", aid0_bit_set ? "ena" : "dis");
+
+       skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
+                       &tim_offset, &tim_length);
+       if (!skb) {
+               if (!__cw1200_flush(priv, true))
+                       wsm_unlock_tx(priv);
+               return -ENOENT;
+       }
+
+       if (tim_offset && tim_length >= 6) {
+               /* Ignore DTIM count from mac80211:
+                * firmware handles DTIM internally.
+                */
+               skb->data[tim_offset + 2] = 0;
+
+               /* Set/reset aid0 bit */
+               if (aid0_bit_set)
+                       skb->data[tim_offset + 4] |= 1;
+               else
+                       skb->data[tim_offset + 4] &= ~1;
+       }
+
+       update_ie.ies = &skb->data[tim_offset];
+       update_ie.length = tim_length;
+       wsm_update_ie(priv, &update_ie);
+
+       dev_kfree_skb(skb);
+
+       return 0;
+}
+
+void cw1200_set_tim_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, set_tim_work);
+       (void)cw1200_set_tim_impl(priv, priv->aid0_bit_set);
+}
+
+int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
+                  bool set)
+{
+       struct cw1200_common *priv = dev->priv;
+       queue_work(priv->workqueue, &priv->set_tim_work);
+       return 0;
+}
+
+void cw1200_set_cts_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, set_cts_work);
+
+       u8 erp_ie[3] = {WLAN_EID_ERP_INFO, 0x1, 0};
+       struct wsm_update_ie update_ie = {
+               .what = WSM_UPDATE_IE_BEACON,
+               .count = 1,
+               .ies = erp_ie,
+               .length = 3,
+       };
+       u32 erp_info;
+       __le32 use_cts_prot;
+       mutex_lock(&priv->conf_mutex);
+       erp_info = priv->erp_info;
+       mutex_unlock(&priv->conf_mutex);
+       use_cts_prot =
+               erp_info & WLAN_ERP_USE_PROTECTION ?
+               __cpu_to_le32(1) : 0;
+
+       erp_ie[ERP_INFO_BYTE_OFFSET] = erp_info;
+
+       pr_debug("[STA] ERP information 0x%x\n", erp_info);
+
+       wsm_write_mib(priv, WSM_MIB_ID_NON_ERP_PROTECTION,
+                     &use_cts_prot, sizeof(use_cts_prot));
+       wsm_update_ie(priv, &update_ie);
+
+       return;
+}
+
+static int cw1200_set_btcoexinfo(struct cw1200_common *priv)
+{
+       struct wsm_override_internal_txrate arg;
+       int ret = 0;
+
+       if (priv->mode == NL80211_IFTYPE_STATION) {
+               /* Plumb PSPOLL and NULL template */
+               cw1200_upload_pspoll(priv);
+               cw1200_upload_null(priv);
+               cw1200_upload_qosnull(priv);
+       } else {
+               return 0;
+       }
+
+       memset(&arg, 0, sizeof(struct wsm_override_internal_txrate));
+
+       if (!priv->vif->p2p) {
+               /* STATION mode */
+               if (priv->bss_params.operational_rate_set & ~0xF) {
+                       pr_debug("[STA] STA has ERP rates\n");
+                       /* G or BG mode */
+                       arg.internalTxRate = (__ffs(
+                       priv->bss_params.operational_rate_set & ~0xF));
+               } else {
+                       pr_debug("[STA] STA has non ERP rates\n");
+                       /* B only mode */
+                       arg.internalTxRate = (__ffs(le32_to_cpu(priv->association_mode.basic_rate_set)));
+               }
+               arg.nonErpInternalTxRate = (__ffs(le32_to_cpu(priv->association_mode.basic_rate_set)));
+       } else {
+               /* P2P mode */
+               arg.internalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF));
+               arg.nonErpInternalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF));
+       }
+
+       pr_debug("[STA] BTCOEX_INFO MODE %d, internalTxRate : %x, nonErpInternalTxRate: %x\n",
+                priv->mode,
+                arg.internalTxRate,
+                arg.nonErpInternalTxRate);
+
+       ret = wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE,
+                           &arg, sizeof(arg));
+
+       return ret;
+}
+
+void cw1200_bss_info_changed(struct ieee80211_hw *dev,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_bss_conf *info,
+                            u32 changed)
+{
+       struct cw1200_common *priv = dev->priv;
+       bool do_join = false;
+
+       mutex_lock(&priv->conf_mutex);
+
+       pr_debug("BSS CHANGED:  %08x\n", changed);
+
+       /* TODO: BSS_CHANGED_QOS */
+       /* TODO: BSS_CHANGED_TXPOWER */
+
+       if (changed & BSS_CHANGED_ARP_FILTER) {
+               struct wsm_mib_arp_ipv4_filter filter = {0};
+               int i;
+
+               pr_debug("[STA] BSS_CHANGED_ARP_FILTER cnt: %d\n",
+                        info->arp_addr_cnt);
+
+               /* Currently only one IP address is supported by firmware.
+                * In case of more IPs arp filtering will be disabled.
+                */
+               if (info->arp_addr_cnt > 0 &&
+                   info->arp_addr_cnt <= WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES) {
+                       for (i = 0; i < info->arp_addr_cnt; i++) {
+                               filter.ipv4addrs[i] = info->arp_addr_list[i];
+                               pr_debug("[STA] addr[%d]: 0x%X\n",
+                                        i, filter.ipv4addrs[i]);
+                       }
+                       filter.enable = __cpu_to_le32(1);
+               }
+
+               pr_debug("[STA] arp ip filter enable: %d\n",
+                        __le32_to_cpu(filter.enable));
+
+               wsm_set_arp_ipv4_filter(priv, &filter);
+       }
+
+       if (changed &
+           (BSS_CHANGED_BEACON |
+            BSS_CHANGED_AP_PROBE_RESP |
+            BSS_CHANGED_BSSID |
+            BSS_CHANGED_SSID |
+            BSS_CHANGED_IBSS)) {
+               pr_debug("BSS_CHANGED_BEACON\n");
+               priv->beacon_int = info->beacon_int;
+               cw1200_update_beaconing(priv);
+               cw1200_upload_beacon(priv);
+       }
+
+       if (changed & BSS_CHANGED_BEACON_ENABLED) {
+               pr_debug("BSS_CHANGED_BEACON_ENABLED (%d)\n", info->enable_beacon);
+
+               if (priv->enable_beacon != info->enable_beacon) {
+                       cw1200_enable_beaconing(priv, info->enable_beacon);
+                       priv->enable_beacon = info->enable_beacon;
+               }
+       }
+
+       if (changed & BSS_CHANGED_BEACON_INT) {
+               pr_debug("CHANGED_BEACON_INT\n");
+               if (info->ibss_joined)
+                       do_join = true;
+               else if (priv->join_status == CW1200_JOIN_STATUS_AP)
+                       cw1200_update_beaconing(priv);
+       }
+
+       /* assoc/disassoc, or maybe AID changed */
+       if (changed & BSS_CHANGED_ASSOC) {
+               wsm_lock_tx(priv);
+               priv->wep_default_key_id = -1;
+               wsm_unlock_tx(priv);
+       }
+
+       if (changed & BSS_CHANGED_BSSID) {
+               pr_debug("BSS_CHANGED_BSSID\n");
+               do_join = true;
+       }
+
+       if (changed &
+           (BSS_CHANGED_ASSOC |
+            BSS_CHANGED_BSSID |
+            BSS_CHANGED_IBSS |
+            BSS_CHANGED_BASIC_RATES |
+            BSS_CHANGED_HT)) {
+               pr_debug("BSS_CHANGED_ASSOC\n");
+               if (info->assoc) {
+                       if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) {
+                               ieee80211_connection_loss(vif);
+                               mutex_unlock(&priv->conf_mutex);
+                               return;
+                       } else if (priv->join_status == CW1200_JOIN_STATUS_PRE_STA) {
+                               priv->join_status = CW1200_JOIN_STATUS_STA;
+                       }
+               } else {
+                       do_join = true;
+               }
+
+               if (info->assoc || info->ibss_joined) {
+                       struct ieee80211_sta *sta = NULL;
+                       __le32 htprot = 0;
+
+                       if (info->dtim_period)
+                               priv->join_dtim_period = info->dtim_period;
+                       priv->beacon_int = info->beacon_int;
+
+                       rcu_read_lock();
+
+                       if (info->bssid && !info->ibss_joined)
+                               sta = ieee80211_find_sta(vif, info->bssid);
+                       if (sta) {
+                               priv->ht_info.ht_cap = sta->ht_cap;
+                               priv->bss_params.operational_rate_set =
+                                       cw1200_rate_mask_to_wsm(priv,
+                                                               sta->supp_rates[priv->channel->band]);
+                               priv->ht_info.channel_type = cfg80211_get_chandef_type(&dev->conf.chandef);
+                               priv->ht_info.operation_mode = info->ht_operation_mode;
+                       } else {
+                               memset(&priv->ht_info, 0,
+                                      sizeof(priv->ht_info));
+                               priv->bss_params.operational_rate_set = -1;
+                       }
+                       rcu_read_unlock();
+
+                       /* Non Greenfield stations present */
+                       if (priv->ht_info.operation_mode &
+                           IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
+                               htprot |= cpu_to_le32(WSM_NON_GREENFIELD_STA_PRESENT);
+
+                       /* Set HT protection method */
+                       htprot |= cpu_to_le32((priv->ht_info.operation_mode & IEEE80211_HT_OP_MODE_PROTECTION) << 2);
+
+                       /* TODO:
+                        * STBC_param.dual_cts
+                        *  STBC_param.LSIG_TXOP_FILL
+                        */
+
+                       wsm_write_mib(priv, WSM_MIB_ID_SET_HT_PROTECTION,
+                                     &htprot, sizeof(htprot));
+
+                       priv->association_mode.greenfield =
+                               cw1200_ht_greenfield(&priv->ht_info);
+                       priv->association_mode.flags =
+                               WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES |
+                               WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE |
+                               WSM_ASSOCIATION_MODE_USE_HT_MODE |
+                               WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET |
+                               WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING;
+                       priv->association_mode.preamble =
+                               info->use_short_preamble ?
+                               WSM_JOIN_PREAMBLE_SHORT :
+                               WSM_JOIN_PREAMBLE_LONG;
+                       priv->association_mode.basic_rate_set = __cpu_to_le32(
+                               cw1200_rate_mask_to_wsm(priv,
+                                                       info->basic_rates));
+                       priv->association_mode.mpdu_start_spacing =
+                               cw1200_ht_ampdu_density(&priv->ht_info);
+
+                       cw1200_cqm_bssloss_sm(priv, 0, 0, 0);
+                       cancel_work_sync(&priv->unjoin_work);
+
+                       priv->bss_params.beacon_lost_count = priv->cqm_beacon_loss_count;
+                       priv->bss_params.aid = info->aid;
+
+                       if (priv->join_dtim_period < 1)
+                               priv->join_dtim_period = 1;
+
+                       pr_debug("[STA] DTIM %d, interval: %d\n",
+                                priv->join_dtim_period, priv->beacon_int);
+                       pr_debug("[STA] Preamble: %d, Greenfield: %d, Aid: %d, Rates: 0x%.8X, Basic: 0x%.8X\n",
+                                priv->association_mode.preamble,
+                                priv->association_mode.greenfield,
+                                priv->bss_params.aid,
+                                priv->bss_params.operational_rate_set,
+                                priv->association_mode.basic_rate_set);
+                       wsm_set_association_mode(priv, &priv->association_mode);
+
+                       if (!info->ibss_joined) {
+                               wsm_keep_alive_period(priv, 30 /* sec */);
+                               wsm_set_bss_params(priv, &priv->bss_params);
+                               priv->setbssparams_done = true;
+                               cw1200_set_beacon_wakeup_period_work(&priv->set_beacon_wakeup_period_work);
+                               cw1200_set_pm(priv, &priv->powersave_mode);
+                       }
+                       if (priv->vif->p2p) {
+                               pr_debug("[STA] Setting p2p powersave configuration.\n");
+                               wsm_set_p2p_ps_modeinfo(priv,
+                                                       &priv->p2p_ps_modeinfo);
+                       }
+                       if (priv->bt_present)
+                               cw1200_set_btcoexinfo(priv);
+               } else {
+                       memset(&priv->association_mode, 0,
+                              sizeof(priv->association_mode));
+                       memset(&priv->bss_params, 0, sizeof(priv->bss_params));
+               }
+       }
+
+       /* ERP Protection */
+       if (changed & (BSS_CHANGED_ASSOC |
+                      BSS_CHANGED_ERP_CTS_PROT |
+                      BSS_CHANGED_ERP_PREAMBLE)) {
+               u32 prev_erp_info = priv->erp_info;
+               if (info->use_cts_prot)
+                       priv->erp_info |= WLAN_ERP_USE_PROTECTION;
+               else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT))
+                       priv->erp_info &= ~WLAN_ERP_USE_PROTECTION;
+
+               if (info->use_short_preamble)
+                       priv->erp_info |= WLAN_ERP_BARKER_PREAMBLE;
+               else
+                       priv->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE;
+
+               pr_debug("[STA] ERP Protection: %x\n", priv->erp_info);
+
+               if (prev_erp_info != priv->erp_info)
+                       queue_work(priv->workqueue, &priv->set_cts_work);
+       }
+
+       /* ERP Slottime */
+       if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT)) {
+               __le32 slot_time = info->use_short_slot ?
+                       __cpu_to_le32(9) : __cpu_to_le32(20);
+               pr_debug("[STA] Slot time: %d us.\n",
+                        __le32_to_cpu(slot_time));
+               wsm_write_mib(priv, WSM_MIB_ID_DOT11_SLOT_TIME,
+                             &slot_time, sizeof(slot_time));
+       }
+
+       if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) {
+               struct wsm_rcpi_rssi_threshold threshold = {
+                       .rollingAverageCount = 8,
+               };
+               pr_debug("[CQM] RSSI threshold subscribe: %d +- %d\n",
+                        info->cqm_rssi_thold, info->cqm_rssi_hyst);
+               priv->cqm_rssi_thold = info->cqm_rssi_thold;
+               priv->cqm_rssi_hyst = info->cqm_rssi_hyst;
+
+               if (info->cqm_rssi_thold || info->cqm_rssi_hyst) {
+                       /* RSSI subscription enabled */
+                       /* TODO: It's not a correct way of setting threshold.
+                        * Upper and lower must be set equal here and adjusted
+                        * in callback. However current implementation is much
+                        * more relaible and stable.
+                        */
+
+                       /* RSSI: signed Q8.0, RCPI: unsigned Q7.1
+                        * RSSI = RCPI / 2 - 110
+                        */
+                       if (priv->cqm_use_rssi) {
+                               threshold.upperThreshold =
+                                       info->cqm_rssi_thold + info->cqm_rssi_hyst;
+                               threshold.lowerThreshold =
+                                       info->cqm_rssi_thold;
+                               threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI;
+                       } else {
+                               threshold.upperThreshold = (info->cqm_rssi_thold + info->cqm_rssi_hyst + 110) * 2;
+                               threshold.lowerThreshold = (info->cqm_rssi_thold + 110) * 2;
+                       }
+                       threshold.rssiRcpiMode |= WSM_RCPI_RSSI_THRESHOLD_ENABLE;
+               } else {
+                       /* There is a bug in FW, see sta.c. We have to enable
+                        * dummy subscription to get correct RSSI values.
+                        */
+                       threshold.rssiRcpiMode |=
+                               WSM_RCPI_RSSI_THRESHOLD_ENABLE |
+                               WSM_RCPI_RSSI_DONT_USE_UPPER |
+                               WSM_RCPI_RSSI_DONT_USE_LOWER;
+                       if (priv->cqm_use_rssi)
+                               threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI;
+               }
+               wsm_set_rcpi_rssi_threshold(priv, &threshold);
+       }
+       mutex_unlock(&priv->conf_mutex);
+
+       if (do_join) {
+               wsm_lock_tx(priv);
+               cw1200_do_join(priv); /* Will unlock it for us */
+       }
+}
+
+void cw1200_multicast_start_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, multicast_start_work);
+       long tmo = priv->join_dtim_period *
+                       (priv->beacon_int + 20) * HZ / 1024;
+
+       cancel_work_sync(&priv->multicast_stop_work);
+
+       if (!priv->aid0_bit_set) {
+               wsm_lock_tx(priv);
+               cw1200_set_tim_impl(priv, true);
+               priv->aid0_bit_set = true;
+               mod_timer(&priv->mcast_timeout, jiffies + tmo);
+               wsm_unlock_tx(priv);
+       }
+}
+
+void cw1200_multicast_stop_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, multicast_stop_work);
+
+       if (priv->aid0_bit_set) {
+               del_timer_sync(&priv->mcast_timeout);
+               wsm_lock_tx(priv);
+               priv->aid0_bit_set = false;
+               cw1200_set_tim_impl(priv, false);
+               wsm_unlock_tx(priv);
+       }
+}
+
+void cw1200_mcast_timeout(unsigned long arg)
+{
+       struct cw1200_common *priv =
+               (struct cw1200_common *)arg;
+
+       wiphy_warn(priv->hw->wiphy,
+                  "Multicast delivery timeout.\n");
+       spin_lock_bh(&priv->ps_state_lock);
+       priv->tx_multicast = priv->aid0_bit_set &&
+                       priv->buffered_multicasts;
+       if (priv->tx_multicast)
+               cw1200_bh_wakeup(priv);
+       spin_unlock_bh(&priv->ps_state_lock);
+}
+
+int cw1200_ampdu_action(struct ieee80211_hw *hw,
+                       struct ieee80211_vif *vif,
+                       enum ieee80211_ampdu_mlme_action action,
+                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                       u8 buf_size)
+{
+       /* Aggregation is implemented fully in firmware,
+        * including block ack negotiation. Do not allow
+        * mac80211 stack to do anything: it interferes with
+        * the firmware.
+        */
+
+       /* Note that we still need this function stubbed. */
+       return -ENOTSUPP;
+}
+
+/* ******************************************************************** */
+/* WSM callback                                                                */
+void cw1200_suspend_resume(struct cw1200_common *priv,
+                         struct wsm_suspend_resume *arg)
+{
+       pr_debug("[AP] %s: %s\n",
+                arg->stop ? "stop" : "start",
+                arg->multicast ? "broadcast" : "unicast");
+
+       if (arg->multicast) {
+               bool cancel_tmo = false;
+               spin_lock_bh(&priv->ps_state_lock);
+               if (arg->stop) {
+                       priv->tx_multicast = false;
+               } else {
+                       /* Firmware sends this indication every DTIM if there
+                        * is a STA in powersave connected. There is no reason
+                        * to suspend, following wakeup will consume much more
+                        * power than it could be saved.
+                        */
+                       cw1200_pm_stay_awake(&priv->pm_state,
+                                            priv->join_dtim_period *
+                                            (priv->beacon_int + 20) * HZ / 1024);
+                       priv->tx_multicast = (priv->aid0_bit_set &&
+                                             priv->buffered_multicasts);
+                       if (priv->tx_multicast) {
+                               cancel_tmo = true;
+                               cw1200_bh_wakeup(priv);
+                       }
+               }
+               spin_unlock_bh(&priv->ps_state_lock);
+               if (cancel_tmo)
+                       del_timer_sync(&priv->mcast_timeout);
+       } else {
+               spin_lock_bh(&priv->ps_state_lock);
+               cw1200_ps_notify(priv, arg->link_id, arg->stop);
+               spin_unlock_bh(&priv->ps_state_lock);
+               if (!arg->stop)
+                       cw1200_bh_wakeup(priv);
+       }
+       return;
+}
+
+/* ******************************************************************** */
+/* AP privates                                                         */
+
+static int cw1200_upload_beacon(struct cw1200_common *priv)
+{
+       int ret = 0;
+       struct ieee80211_mgmt *mgmt;
+       struct wsm_template_frame frame = {
+               .frame_type = WSM_FRAME_TYPE_BEACON,
+       };
+
+       u16 tim_offset;
+       u16 tim_len;
+
+       if (priv->mode == NL80211_IFTYPE_STATION ||
+           priv->mode == NL80211_IFTYPE_MONITOR ||
+           priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+               goto done;
+
+       if (priv->vif->p2p)
+               frame.rate = WSM_TRANSMIT_RATE_6;
+
+       frame.skb = ieee80211_beacon_get_tim(priv->hw, priv->vif,
+                                            &tim_offset, &tim_len);
+       if (!frame.skb)
+               return -ENOMEM;
+
+       ret = wsm_set_template_frame(priv, &frame);
+
+       if (ret)
+               goto done;
+
+       /* TODO: Distill probe resp; remove TIM
+        * and any other beacon-specific IEs
+        */
+       mgmt = (void *)frame.skb->data;
+       mgmt->frame_control =
+               __cpu_to_le16(IEEE80211_FTYPE_MGMT |
+                             IEEE80211_STYPE_PROBE_RESP);
+
+       frame.frame_type = WSM_FRAME_TYPE_PROBE_RESPONSE;
+       if (priv->vif->p2p) {
+               ret = wsm_set_probe_responder(priv, true);
+       } else {
+               ret = wsm_set_template_frame(priv, &frame);
+               wsm_set_probe_responder(priv, false);
+       }
+
+done:
+       dev_kfree_skb(frame.skb);
+
+       return ret;
+}
+
+static int cw1200_upload_pspoll(struct cw1200_common *priv)
+{
+       int ret = 0;
+       struct wsm_template_frame frame = {
+               .frame_type = WSM_FRAME_TYPE_PS_POLL,
+               .rate = 0xFF,
+       };
+
+
+       frame.skb = ieee80211_pspoll_get(priv->hw, priv->vif);
+       if (!frame.skb)
+               return -ENOMEM;
+
+       ret = wsm_set_template_frame(priv, &frame);
+
+       dev_kfree_skb(frame.skb);
+
+       return ret;
+}
+
+static int cw1200_upload_null(struct cw1200_common *priv)
+{
+       int ret = 0;
+       struct wsm_template_frame frame = {
+               .frame_type = WSM_FRAME_TYPE_NULL,
+               .rate = 0xFF,
+       };
+
+       frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+       if (!frame.skb)
+               return -ENOMEM;
+
+       ret = wsm_set_template_frame(priv, &frame);
+
+       dev_kfree_skb(frame.skb);
+
+       return ret;
+}
+
+static int cw1200_upload_qosnull(struct cw1200_common *priv)
+{
+       int ret = 0;
+       /* TODO:  This needs to be implemented
+
+       struct wsm_template_frame frame = {
+               .frame_type = WSM_FRAME_TYPE_QOS_NULL,
+               .rate = 0xFF,
+       };
+
+       frame.skb = ieee80211_qosnullfunc_get(priv->hw, priv->vif);
+       if (!frame.skb)
+               return -ENOMEM;
+
+       ret = wsm_set_template_frame(priv, &frame);
+
+       dev_kfree_skb(frame.skb);
+
+       */
+       return ret;
+}
+
+static int cw1200_enable_beaconing(struct cw1200_common *priv,
+                                  bool enable)
+{
+       struct wsm_beacon_transmit transmit = {
+               .enable_beaconing = enable,
+       };
+
+       return wsm_beacon_transmit(priv, &transmit);
+}
+
+static int cw1200_start_ap(struct cw1200_common *priv)
+{
+       int ret;
+       struct ieee80211_bss_conf *conf = &priv->vif->bss_conf;
+       struct wsm_start start = {
+               .mode = priv->vif->p2p ?
+                               WSM_START_MODE_P2P_GO : WSM_START_MODE_AP,
+               .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+                               WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G,
+               .channel_number = priv->channel->hw_value,
+               .beacon_interval = conf->beacon_int,
+               .dtim_period = conf->dtim_period,
+               .preamble = conf->use_short_preamble ?
+                               WSM_JOIN_PREAMBLE_SHORT :
+                               WSM_JOIN_PREAMBLE_LONG,
+               .probe_delay = 100,
+               .basic_rate_set = cw1200_rate_mask_to_wsm(priv,
+                               conf->basic_rates),
+       };
+       struct wsm_operational_mode mode = {
+               .power_mode = cw1200_power_mode,
+               .disable_more_flag_usage = true,
+       };
+
+       memset(start.ssid, 0, sizeof(start.ssid));
+       if (!conf->hidden_ssid) {
+               start.ssid_len = conf->ssid_len;
+               memcpy(start.ssid, conf->ssid, start.ssid_len);
+       }
+
+       priv->beacon_int = conf->beacon_int;
+       priv->join_dtim_period = conf->dtim_period;
+
+       memset(&priv->link_id_db, 0, sizeof(priv->link_id_db));
+
+       pr_debug("[AP] ch: %d(%d), bcn: %d(%d), brt: 0x%.8X, ssid: %.*s.\n",
+                start.channel_number, start.band,
+                start.beacon_interval, start.dtim_period,
+                start.basic_rate_set,
+                start.ssid_len, start.ssid);
+       ret = wsm_start(priv, &start);
+       if (!ret)
+               ret = cw1200_upload_keys(priv);
+       if (!ret && priv->vif->p2p) {
+               pr_debug("[AP] Setting p2p powersave configuration.\n");
+               wsm_set_p2p_ps_modeinfo(priv, &priv->p2p_ps_modeinfo);
+       }
+       if (!ret) {
+               wsm_set_block_ack_policy(priv, 0, 0);
+               priv->join_status = CW1200_JOIN_STATUS_AP;
+               cw1200_update_filtering(priv);
+       }
+       wsm_set_operational_mode(priv, &mode);
+       return ret;
+}
+
+static int cw1200_update_beaconing(struct cw1200_common *priv)
+{
+       struct ieee80211_bss_conf *conf = &priv->vif->bss_conf;
+       struct wsm_reset reset = {
+               .link_id = 0,
+               .reset_statistics = true,
+       };
+
+       if (priv->mode == NL80211_IFTYPE_AP) {
+               /* TODO: check if changed channel, band */
+               if (priv->join_status != CW1200_JOIN_STATUS_AP ||
+                   priv->beacon_int != conf->beacon_int) {
+                       pr_debug("ap restarting\n");
+                       wsm_lock_tx(priv);
+                       if (priv->join_status != CW1200_JOIN_STATUS_PASSIVE)
+                               wsm_reset(priv, &reset);
+                       priv->join_status = CW1200_JOIN_STATUS_PASSIVE;
+                       cw1200_start_ap(priv);
+                       wsm_unlock_tx(priv);
+               } else
+                       pr_debug("ap started join_status: %d\n",
+                                priv->join_status);
+       }
+       return 0;
+}
diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h
new file mode 100644 (file)
index 0000000..35babb6
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Mac80211 STA interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef STA_H_INCLUDED
+#define STA_H_INCLUDED
+
+/* ******************************************************************** */
+/* mac80211 API                                                                */
+
+int cw1200_start(struct ieee80211_hw *dev);
+void cw1200_stop(struct ieee80211_hw *dev);
+int cw1200_add_interface(struct ieee80211_hw *dev,
+                        struct ieee80211_vif *vif);
+void cw1200_remove_interface(struct ieee80211_hw *dev,
+                            struct ieee80211_vif *vif);
+int cw1200_change_interface(struct ieee80211_hw *dev,
+                           struct ieee80211_vif *vif,
+                           enum nl80211_iftype new_type,
+                           bool p2p);
+int cw1200_config(struct ieee80211_hw *dev, u32 changed);
+void cw1200_configure_filter(struct ieee80211_hw *dev,
+                            unsigned int changed_flags,
+                            unsigned int *total_flags,
+                            u64 multicast);
+int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+                  u16 queue, const struct ieee80211_tx_queue_params *params);
+int cw1200_get_stats(struct ieee80211_hw *dev,
+                    struct ieee80211_low_level_stats *stats);
+int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
+                  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+                  struct ieee80211_key_conf *key);
+
+int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
+
+void cw1200_flush(struct ieee80211_hw *hw, u32 queues, bool drop);
+
+u64 cw1200_prepare_multicast(struct ieee80211_hw *hw,
+                            struct netdev_hw_addr_list *mc_list);
+
+int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg);
+
+/* ******************************************************************** */
+/* WSM callbacks                                                       */
+
+void cw1200_join_complete_cb(struct cw1200_common *priv,
+                               struct wsm_join_complete *arg);
+
+/* ******************************************************************** */
+/* WSM events                                                          */
+
+void cw1200_free_event_queue(struct cw1200_common *priv);
+void cw1200_event_handler(struct work_struct *work);
+void cw1200_bss_loss_work(struct work_struct *work);
+void cw1200_bss_params_work(struct work_struct *work);
+void cw1200_keep_alive_work(struct work_struct *work);
+void cw1200_tx_failure_work(struct work_struct *work);
+
+void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, int init, int good,
+                            int bad);
+static inline void cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
+                                        int init, int good, int bad)
+{
+       spin_lock(&priv->bss_loss_lock);
+       __cw1200_cqm_bssloss_sm(priv, init, good, bad);
+       spin_unlock(&priv->bss_loss_lock);
+}
+
+/* ******************************************************************** */
+/* Internal API                                                                */
+
+int cw1200_setup_mac(struct cw1200_common *priv);
+void cw1200_join_timeout(struct work_struct *work);
+void cw1200_unjoin_work(struct work_struct *work);
+void cw1200_join_complete_work(struct work_struct *work);
+void cw1200_wep_key_work(struct work_struct *work);
+void cw1200_update_listening(struct cw1200_common *priv, bool enabled);
+void cw1200_update_filtering(struct cw1200_common *priv);
+void cw1200_update_filtering_work(struct work_struct *work);
+void cw1200_set_beacon_wakeup_period_work(struct work_struct *work);
+int cw1200_enable_listening(struct cw1200_common *priv);
+int cw1200_disable_listening(struct cw1200_common *priv);
+int cw1200_set_uapsd_param(struct cw1200_common *priv,
+                               const struct wsm_edca_params *arg);
+void cw1200_ba_work(struct work_struct *work);
+void cw1200_ba_timer(unsigned long arg);
+
+/* AP stuffs */
+int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
+                  bool set);
+int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                  struct ieee80211_sta *sta);
+int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                     struct ieee80211_sta *sta);
+void cw1200_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
+                      enum sta_notify_cmd notify_cmd,
+                      struct ieee80211_sta *sta);
+void cw1200_bss_info_changed(struct ieee80211_hw *dev,
+                            struct ieee80211_vif *vif,
+                            struct ieee80211_bss_conf *info,
+                            u32 changed);
+int cw1200_ampdu_action(struct ieee80211_hw *hw,
+                       struct ieee80211_vif *vif,
+                       enum ieee80211_ampdu_mlme_action action,
+                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                       u8 buf_size);
+
+void cw1200_suspend_resume(struct cw1200_common *priv,
+                         struct wsm_suspend_resume *arg);
+void cw1200_set_tim_work(struct work_struct *work);
+void cw1200_set_cts_work(struct work_struct *work);
+void cw1200_multicast_start_work(struct work_struct *work);
+void cw1200_multicast_stop_work(struct work_struct *work);
+void cw1200_mcast_timeout(unsigned long arg);
+
+#endif
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c
new file mode 100644 (file)
index 0000000..5862c37
--- /dev/null
@@ -0,0 +1,1473 @@
+/*
+ * Datapath implementation for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include "cw1200.h"
+#include "wsm.h"
+#include "bh.h"
+#include "sta.h"
+#include "debug.h"
+
+#define CW1200_INVALID_RATE_ID (0xFF)
+
+static int cw1200_handle_action_rx(struct cw1200_common *priv,
+                                  struct sk_buff *skb);
+static const struct ieee80211_rate *
+cw1200_get_tx_rate(const struct cw1200_common *priv,
+                  const struct ieee80211_tx_rate *rate);
+
+/* ******************************************************************** */
+/* TX queue lock / unlock                                              */
+
+static inline void cw1200_tx_queues_lock(struct cw1200_common *priv)
+{
+       int i;
+       for (i = 0; i < 4; ++i)
+               cw1200_queue_lock(&priv->tx_queue[i]);
+}
+
+static inline void cw1200_tx_queues_unlock(struct cw1200_common *priv)
+{
+       int i;
+       for (i = 0; i < 4; ++i)
+               cw1200_queue_unlock(&priv->tx_queue[i]);
+}
+
+/* ******************************************************************** */
+/* TX policy cache implementation                                      */
+
+static void tx_policy_dump(struct tx_policy *policy)
+{
+       pr_debug("[TX policy] %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X: %d\n",
+                policy->raw[0] & 0x0F,  policy->raw[0] >> 4,
+                policy->raw[1] & 0x0F,  policy->raw[1] >> 4,
+                policy->raw[2] & 0x0F,  policy->raw[2] >> 4,
+                policy->raw[3] & 0x0F,  policy->raw[3] >> 4,
+                policy->raw[4] & 0x0F,  policy->raw[4] >> 4,
+                policy->raw[5] & 0x0F,  policy->raw[5] >> 4,
+                policy->raw[6] & 0x0F,  policy->raw[6] >> 4,
+                policy->raw[7] & 0x0F,  policy->raw[7] >> 4,
+                policy->raw[8] & 0x0F,  policy->raw[8] >> 4,
+                policy->raw[9] & 0x0F,  policy->raw[9] >> 4,
+                policy->raw[10] & 0x0F,  policy->raw[10] >> 4,
+                policy->raw[11] & 0x0F,  policy->raw[11] >> 4,
+                policy->defined);
+}
+
+static void tx_policy_build(const struct cw1200_common *priv,
+       /* [out] */ struct tx_policy *policy,
+       struct ieee80211_tx_rate *rates, size_t count)
+{
+       int i, j;
+       unsigned limit = priv->short_frame_max_tx_count;
+       unsigned total = 0;
+       BUG_ON(rates[0].idx < 0);
+       memset(policy, 0, sizeof(*policy));
+
+       /* Sort rates in descending order. */
+       for (i = 1; i < count; ++i) {
+               if (rates[i].idx < 0) {
+                       count = i;
+                       break;
+               }
+               if (rates[i].idx > rates[i - 1].idx) {
+                       struct ieee80211_tx_rate tmp = rates[i - 1];
+                       rates[i - 1] = rates[i];
+                       rates[i] = tmp;
+               }
+       }
+
+       /* Eliminate duplicates. */
+       total = rates[0].count;
+       for (i = 0, j = 1; j < count; ++j) {
+               if (rates[j].idx == rates[i].idx) {
+                       rates[i].count += rates[j].count;
+               } else if (rates[j].idx > rates[i].idx) {
+                       break;
+               } else {
+                       ++i;
+                       if (i != j)
+                               rates[i] = rates[j];
+               }
+               total += rates[j].count;
+       }
+       count = i + 1;
+
+       /* Re-fill policy trying to keep every requested rate and with
+        * respect to the global max tx retransmission count.
+        */
+       if (limit < count)
+               limit = count;
+       if (total > limit) {
+               for (i = 0; i < count; ++i) {
+                       int left = count - i - 1;
+                       if (rates[i].count > limit - left)
+                               rates[i].count = limit - left;
+                       limit -= rates[i].count;
+               }
+       }
+
+       /* HACK!!! Device has problems (at least) switching from
+        * 54Mbps CTS to 1Mbps. This switch takes enormous amount
+        * of time (100-200 ms), leading to valuable throughput drop.
+        * As a workaround, additional g-rates are injected to the
+        * policy.
+        */
+       if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) &&
+           rates[0].idx > 4 && rates[0].count > 2 &&
+           rates[1].idx < 2) {
+               int mid_rate = (rates[0].idx + 4) >> 1;
+
+               /* Decrease number of retries for the initial rate */
+               rates[0].count -= 2;
+
+               if (mid_rate != 4) {
+                       /* Keep fallback rate at 1Mbps. */
+                       rates[3] = rates[1];
+
+                       /* Inject 1 transmission on lowest g-rate */
+                       rates[2].idx = 4;
+                       rates[2].count = 1;
+                       rates[2].flags = rates[1].flags;
+
+                       /* Inject 1 transmission on mid-rate */
+                       rates[1].idx = mid_rate;
+                       rates[1].count = 1;
+
+                       /* Fallback to 1 Mbps is a really bad thing,
+                        * so let's try to increase probability of
+                        * successful transmission on the lowest g rate
+                        * even more
+                        */
+                       if (rates[0].count >= 3) {
+                               --rates[0].count;
+                               ++rates[2].count;
+                       }
+
+                       /* Adjust amount of rates defined */
+                       count += 2;
+               } else {
+                       /* Keep fallback rate at 1Mbps. */
+                       rates[2] = rates[1];
+
+                       /* Inject 2 transmissions on lowest g-rate */
+                       rates[1].idx = 4;
+                       rates[1].count = 2;
+
+                       /* Adjust amount of rates defined */
+                       count += 1;
+               }
+       }
+
+       policy->defined = cw1200_get_tx_rate(priv, &rates[0])->hw_value + 1;
+
+       for (i = 0; i < count; ++i) {
+               register unsigned rateid, off, shift, retries;
+
+               rateid = cw1200_get_tx_rate(priv, &rates[i])->hw_value;
+               off = rateid >> 3;              /* eq. rateid / 8 */
+               shift = (rateid & 0x07) << 2;   /* eq. (rateid % 8) * 4 */
+
+               retries = rates[i].count;
+               if (retries > 0x0F) {
+                       rates[i].count = 0x0f;
+                       retries = 0x0F;
+               }
+               policy->tbl[off] |= __cpu_to_le32(retries << shift);
+               policy->retry_count += retries;
+       }
+
+       pr_debug("[TX policy] Policy (%zu): %d:%d, %d:%d, %d:%d, %d:%d\n",
+                count,
+                rates[0].idx, rates[0].count,
+                rates[1].idx, rates[1].count,
+                rates[2].idx, rates[2].count,
+                rates[3].idx, rates[3].count);
+}
+
+static inline bool tx_policy_is_equal(const struct tx_policy *wanted,
+                                       const struct tx_policy *cached)
+{
+       size_t count = wanted->defined >> 1;
+       if (wanted->defined > cached->defined)
+               return false;
+       if (count) {
+               if (memcmp(wanted->raw, cached->raw, count))
+                       return false;
+       }
+       if (wanted->defined & 1) {
+               if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F))
+                       return false;
+       }
+       return true;
+}
+
+static int tx_policy_find(struct tx_policy_cache *cache,
+                               const struct tx_policy *wanted)
+{
+       /* O(n) complexity. Not so good, but there's only 8 entries in
+        * the cache.
+        * Also lru helps to reduce search time.
+        */
+       struct tx_policy_cache_entry *it;
+       /* First search for policy in "used" list */
+       list_for_each_entry(it, &cache->used, link) {
+               if (tx_policy_is_equal(wanted, &it->policy))
+                       return it - cache->cache;
+       }
+       /* Then - in "free list" */
+       list_for_each_entry(it, &cache->free, link) {
+               if (tx_policy_is_equal(wanted, &it->policy))
+                       return it - cache->cache;
+       }
+       return -1;
+}
+
+static inline void tx_policy_use(struct tx_policy_cache *cache,
+                                struct tx_policy_cache_entry *entry)
+{
+       ++entry->policy.usage_count;
+       list_move(&entry->link, &cache->used);
+}
+
+static inline int tx_policy_release(struct tx_policy_cache *cache,
+                                   struct tx_policy_cache_entry *entry)
+{
+       int ret = --entry->policy.usage_count;
+       if (!ret)
+               list_move(&entry->link, &cache->free);
+       return ret;
+}
+
+void tx_policy_clean(struct cw1200_common *priv)
+{
+       int idx, locked;
+       struct tx_policy_cache *cache = &priv->tx_policy_cache;
+       struct tx_policy_cache_entry *entry;
+
+       cw1200_tx_queues_lock(priv);
+       spin_lock_bh(&cache->lock);
+       locked = list_empty(&cache->free);
+
+       for (idx = 0; idx < TX_POLICY_CACHE_SIZE; idx++) {
+               entry = &cache->cache[idx];
+               /* Policy usage count should be 0 at this time as all queues
+                  should be empty
+                */
+               if (WARN_ON(entry->policy.usage_count)) {
+                       entry->policy.usage_count = 0;
+                       list_move(&entry->link, &cache->free);
+               }
+               memset(&entry->policy, 0, sizeof(entry->policy));
+       }
+       if (locked)
+               cw1200_tx_queues_unlock(priv);
+
+       cw1200_tx_queues_unlock(priv);
+       spin_unlock_bh(&cache->lock);
+}
+
+/* ******************************************************************** */
+/* External TX policy cache API                                                */
+
+void tx_policy_init(struct cw1200_common *priv)
+{
+       struct tx_policy_cache *cache = &priv->tx_policy_cache;
+       int i;
+
+       memset(cache, 0, sizeof(*cache));
+
+       spin_lock_init(&cache->lock);
+       INIT_LIST_HEAD(&cache->used);
+       INIT_LIST_HEAD(&cache->free);
+
+       for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i)
+               list_add(&cache->cache[i].link, &cache->free);
+}
+
+static int tx_policy_get(struct cw1200_common *priv,
+                 struct ieee80211_tx_rate *rates,
+                 size_t count, bool *renew)
+{
+       int idx;
+       struct tx_policy_cache *cache = &priv->tx_policy_cache;
+       struct tx_policy wanted;
+
+       tx_policy_build(priv, &wanted, rates, count);
+
+       spin_lock_bh(&cache->lock);
+       if (WARN_ON_ONCE(list_empty(&cache->free))) {
+               spin_unlock_bh(&cache->lock);
+               return CW1200_INVALID_RATE_ID;
+       }
+       idx = tx_policy_find(cache, &wanted);
+       if (idx >= 0) {
+               pr_debug("[TX policy] Used TX policy: %d\n", idx);
+               *renew = false;
+       } else {
+               struct tx_policy_cache_entry *entry;
+               *renew = true;
+               /* If policy is not found create a new one
+                * using the oldest entry in "free" list
+                */
+               entry = list_entry(cache->free.prev,
+                       struct tx_policy_cache_entry, link);
+               entry->policy = wanted;
+               idx = entry - cache->cache;
+               pr_debug("[TX policy] New TX policy: %d\n", idx);
+               tx_policy_dump(&entry->policy);
+       }
+       tx_policy_use(cache, &cache->cache[idx]);
+       if (list_empty(&cache->free)) {
+               /* Lock TX queues. */
+               cw1200_tx_queues_lock(priv);
+       }
+       spin_unlock_bh(&cache->lock);
+       return idx;
+}
+
+static void tx_policy_put(struct cw1200_common *priv, int idx)
+{
+       int usage, locked;
+       struct tx_policy_cache *cache = &priv->tx_policy_cache;
+
+       spin_lock_bh(&cache->lock);
+       locked = list_empty(&cache->free);
+       usage = tx_policy_release(cache, &cache->cache[idx]);
+       if (locked && !usage) {
+               /* Unlock TX queues. */
+               cw1200_tx_queues_unlock(priv);
+       }
+       spin_unlock_bh(&cache->lock);
+}
+
+static int tx_policy_upload(struct cw1200_common *priv)
+{
+       struct tx_policy_cache *cache = &priv->tx_policy_cache;
+       int i;
+       struct wsm_set_tx_rate_retry_policy arg = {
+               .num = 0,
+       };
+       spin_lock_bh(&cache->lock);
+
+       /* Upload only modified entries. */
+       for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) {
+               struct tx_policy *src = &cache->cache[i].policy;
+               if (src->retry_count && !src->uploaded) {
+                       struct wsm_tx_rate_retry_policy *dst =
+                               &arg.tbl[arg.num];
+                       dst->index = i;
+                       dst->short_retries = priv->short_frame_max_tx_count;
+                       dst->long_retries = priv->long_frame_max_tx_count;
+
+                       dst->flags = WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED |
+                               WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT;
+                       memcpy(dst->rate_count_indices, src->tbl,
+                              sizeof(dst->rate_count_indices));
+                       src->uploaded = 1;
+                       ++arg.num;
+               }
+       }
+       spin_unlock_bh(&cache->lock);
+       cw1200_debug_tx_cache_miss(priv);
+       pr_debug("[TX policy] Upload %d policies\n", arg.num);
+       return wsm_set_tx_rate_retry_policy(priv, &arg);
+}
+
+void tx_policy_upload_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, tx_policy_upload_work);
+
+       pr_debug("[TX] TX policy upload.\n");
+       tx_policy_upload(priv);
+
+       wsm_unlock_tx(priv);
+       cw1200_tx_queues_unlock(priv);
+}
+
+/* ******************************************************************** */
+/* cw1200 TX implementation                                            */
+
+struct cw1200_txinfo {
+       struct sk_buff *skb;
+       unsigned queue;
+       struct ieee80211_tx_info *tx_info;
+       const struct ieee80211_rate *rate;
+       struct ieee80211_hdr *hdr;
+       size_t hdrlen;
+       const u8 *da;
+       struct cw1200_sta_priv *sta_priv;
+       struct ieee80211_sta *sta;
+       struct cw1200_txpriv txpriv;
+};
+
+u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, u32 rates)
+{
+       u32 ret = 0;
+       int i;
+       for (i = 0; i < 32; ++i) {
+               if (rates & BIT(i))
+                       ret |= BIT(priv->rates[i].hw_value);
+       }
+       return ret;
+}
+
+static const struct ieee80211_rate *
+cw1200_get_tx_rate(const struct cw1200_common *priv,
+                  const struct ieee80211_tx_rate *rate)
+{
+       if (rate->idx < 0)
+               return NULL;
+       if (rate->flags & IEEE80211_TX_RC_MCS)
+               return &priv->mcs_rates[rate->idx];
+       return &priv->hw->wiphy->bands[priv->channel->band]->
+               bitrates[rate->idx];
+}
+
+static int
+cw1200_tx_h_calc_link_ids(struct cw1200_common *priv,
+                         struct cw1200_txinfo *t)
+{
+       if (t->sta && t->sta_priv->link_id)
+               t->txpriv.raw_link_id =
+                               t->txpriv.link_id =
+                               t->sta_priv->link_id;
+       else if (priv->mode != NL80211_IFTYPE_AP)
+               t->txpriv.raw_link_id =
+                               t->txpriv.link_id = 0;
+       else if (is_multicast_ether_addr(t->da)) {
+               if (priv->enable_beacon) {
+                       t->txpriv.raw_link_id = 0;
+                       t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM;
+               } else {
+                       t->txpriv.raw_link_id = 0;
+                       t->txpriv.link_id = 0;
+               }
+       } else {
+               t->txpriv.link_id = cw1200_find_link_id(priv, t->da);
+               if (!t->txpriv.link_id)
+                       t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da);
+               if (!t->txpriv.link_id) {
+                       wiphy_err(priv->hw->wiphy,
+                                 "No more link IDs available.\n");
+                       return -ENOENT;
+               }
+               t->txpriv.raw_link_id = t->txpriv.link_id;
+       }
+       if (t->txpriv.raw_link_id)
+               priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp =
+                               jiffies;
+       if (t->sta && (t->sta->uapsd_queues & BIT(t->queue)))
+               t->txpriv.link_id = CW1200_LINK_ID_UAPSD;
+       return 0;
+}
+
+static void
+cw1200_tx_h_pm(struct cw1200_common *priv,
+              struct cw1200_txinfo *t)
+{
+       if (ieee80211_is_auth(t->hdr->frame_control)) {
+               u32 mask = ~BIT(t->txpriv.raw_link_id);
+               spin_lock_bh(&priv->ps_state_lock);
+               priv->sta_asleep_mask &= mask;
+               priv->pspoll_mask &= mask;
+               spin_unlock_bh(&priv->ps_state_lock);
+       }
+}
+
+static void
+cw1200_tx_h_calc_tid(struct cw1200_common *priv,
+                    struct cw1200_txinfo *t)
+{
+       if (ieee80211_is_data_qos(t->hdr->frame_control)) {
+               u8 *qos = ieee80211_get_qos_ctl(t->hdr);
+               t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK;
+       } else if (ieee80211_is_data(t->hdr->frame_control)) {
+               t->txpriv.tid = 0;
+       }
+}
+
+static int
+cw1200_tx_h_crypt(struct cw1200_common *priv,
+                 struct cw1200_txinfo *t)
+{
+       if (!t->tx_info->control.hw_key ||
+           !ieee80211_has_protected(t->hdr->frame_control))
+               return 0;
+
+       t->hdrlen += t->tx_info->control.hw_key->iv_len;
+       skb_put(t->skb, t->tx_info->control.hw_key->icv_len);
+
+       if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
+               skb_put(t->skb, 8); /* MIC space */
+
+       return 0;
+}
+
+static int
+cw1200_tx_h_align(struct cw1200_common *priv,
+                 struct cw1200_txinfo *t,
+                 u8 *flags)
+{
+       size_t offset = (size_t)t->skb->data & 3;
+
+       if (!offset)
+               return 0;
+
+       if (offset & 1) {
+               wiphy_err(priv->hw->wiphy,
+                         "Bug: attempt to transmit a frame with wrong alignment: %zu\n",
+                         offset);
+               return -EINVAL;
+       }
+
+       if (skb_headroom(t->skb) < offset) {
+               wiphy_err(priv->hw->wiphy,
+                         "Bug: no space allocated for DMA alignment. headroom: %d\n",
+                         skb_headroom(t->skb));
+               return -ENOMEM;
+       }
+       skb_push(t->skb, offset);
+       t->hdrlen += offset;
+       t->txpriv.offset += offset;
+       *flags |= WSM_TX_2BYTES_SHIFT;
+       cw1200_debug_tx_align(priv);
+       return 0;
+}
+
+static int
+cw1200_tx_h_action(struct cw1200_common *priv,
+                  struct cw1200_txinfo *t)
+{
+       struct ieee80211_mgmt *mgmt =
+               (struct ieee80211_mgmt *)t->hdr;
+       if (ieee80211_is_action(t->hdr->frame_control) &&
+           mgmt->u.action.category == WLAN_CATEGORY_BACK)
+               return 1;
+       else
+               return 0;
+}
+
+/* Add WSM header */
+static struct wsm_tx *
+cw1200_tx_h_wsm(struct cw1200_common *priv,
+               struct cw1200_txinfo *t)
+{
+       struct wsm_tx *wsm;
+
+       if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) {
+               wiphy_err(priv->hw->wiphy,
+                         "Bug: no space allocated for WSM header. headroom: %d\n",
+                         skb_headroom(t->skb));
+               return NULL;
+       }
+
+       wsm = (struct wsm_tx *)skb_push(t->skb, sizeof(struct wsm_tx));
+       t->txpriv.offset += sizeof(struct wsm_tx);
+       memset(wsm, 0, sizeof(*wsm));
+       wsm->hdr.len = __cpu_to_le16(t->skb->len);
+       wsm->hdr.id = __cpu_to_le16(0x0004);
+       wsm->queue_id = wsm_queue_id_to_wsm(t->queue);
+       return wsm;
+}
+
+/* BT Coex specific handling */
+static void
+cw1200_tx_h_bt(struct cw1200_common *priv,
+              struct cw1200_txinfo *t,
+              struct wsm_tx *wsm)
+{
+       u8 priority = 0;
+
+       if (!priv->bt_present)
+               return;
+
+       if (ieee80211_is_nullfunc(t->hdr->frame_control)) {
+               priority = WSM_EPTA_PRIORITY_MGT;
+       } else if (ieee80211_is_data(t->hdr->frame_control)) {
+               /* Skip LLC SNAP header (+6) */
+               u8 *payload = &t->skb->data[t->hdrlen];
+               __be16 *ethertype = (__be16 *)&payload[6];
+               if (be16_to_cpu(*ethertype) == ETH_P_PAE)
+                       priority = WSM_EPTA_PRIORITY_EAPOL;
+       } else if (ieee80211_is_assoc_req(t->hdr->frame_control) ||
+               ieee80211_is_reassoc_req(t->hdr->frame_control)) {
+               struct ieee80211_mgmt *mgt_frame =
+                               (struct ieee80211_mgmt *)t->hdr;
+
+               if (le16_to_cpu(mgt_frame->u.assoc_req.listen_interval) <
+                                               priv->listen_interval) {
+                       pr_debug("Modified Listen Interval to %d from %d\n",
+                                priv->listen_interval,
+                                mgt_frame->u.assoc_req.listen_interval);
+                       /* Replace listen interval derieved from
+                        * the one read from SDD
+                        */
+                       mgt_frame->u.assoc_req.listen_interval = cpu_to_le16(priv->listen_interval);
+               }
+       }
+
+       if (!priority) {
+               if (ieee80211_is_action(t->hdr->frame_control))
+                       priority = WSM_EPTA_PRIORITY_ACTION;
+               else if (ieee80211_is_mgmt(t->hdr->frame_control))
+                       priority = WSM_EPTA_PRIORITY_MGT;
+               else if ((wsm->queue_id == WSM_QUEUE_VOICE))
+                       priority = WSM_EPTA_PRIORITY_VOICE;
+               else if ((wsm->queue_id == WSM_QUEUE_VIDEO))
+                       priority = WSM_EPTA_PRIORITY_VIDEO;
+               else
+                       priority = WSM_EPTA_PRIORITY_DATA;
+       }
+
+       pr_debug("[TX] EPTA priority %d.\n", priority);
+
+       wsm->flags |= priority << 1;
+}
+
+static int
+cw1200_tx_h_rate_policy(struct cw1200_common *priv,
+                       struct cw1200_txinfo *t,
+                       struct wsm_tx *wsm)
+{
+       bool tx_policy_renew = false;
+
+       t->txpriv.rate_id = tx_policy_get(priv,
+               t->tx_info->control.rates, IEEE80211_TX_MAX_RATES,
+               &tx_policy_renew);
+       if (t->txpriv.rate_id == CW1200_INVALID_RATE_ID)
+               return -EFAULT;
+
+       wsm->flags |= t->txpriv.rate_id << 4;
+
+       t->rate = cw1200_get_tx_rate(priv,
+               &t->tx_info->control.rates[0]),
+       wsm->max_tx_rate = t->rate->hw_value;
+       if (t->rate->flags & IEEE80211_TX_RC_MCS) {
+               if (cw1200_ht_greenfield(&priv->ht_info))
+                       wsm->ht_tx_parameters |=
+                               __cpu_to_le32(WSM_HT_TX_GREENFIELD);
+               else
+                       wsm->ht_tx_parameters |=
+                               __cpu_to_le32(WSM_HT_TX_MIXED);
+       }
+
+       if (tx_policy_renew) {
+               pr_debug("[TX] TX policy renew.\n");
+               /* It's not so optimal to stop TX queues every now and then.
+                * Better to reimplement task scheduling with
+                * a counter. TODO.
+                */
+               wsm_lock_tx_async(priv);
+               cw1200_tx_queues_lock(priv);
+               if (queue_work(priv->workqueue,
+                              &priv->tx_policy_upload_work) <= 0) {
+                       cw1200_tx_queues_unlock(priv);
+                       wsm_unlock_tx(priv);
+               }
+       }
+       return 0;
+}
+
+static bool
+cw1200_tx_h_pm_state(struct cw1200_common *priv,
+                    struct cw1200_txinfo *t)
+{
+       int was_buffered = 1;
+
+       if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM &&
+           !priv->buffered_multicasts) {
+               priv->buffered_multicasts = true;
+               if (priv->sta_asleep_mask)
+                       queue_work(priv->workqueue,
+                                  &priv->multicast_start_work);
+       }
+
+       if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID)
+               was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1].buffered[t->txpriv.tid]++;
+
+       return !was_buffered;
+}
+
+/* ******************************************************************** */
+
+void cw1200_tx(struct ieee80211_hw *dev,
+              struct ieee80211_tx_control *control,
+              struct sk_buff *skb)
+{
+       struct cw1200_common *priv = dev->priv;
+       struct cw1200_txinfo t = {
+               .skb = skb,
+               .queue = skb_get_queue_mapping(skb),
+               .tx_info = IEEE80211_SKB_CB(skb),
+               .hdr = (struct ieee80211_hdr *)skb->data,
+               .txpriv.tid = CW1200_MAX_TID,
+               .txpriv.rate_id = CW1200_INVALID_RATE_ID,
+       };
+       struct ieee80211_sta *sta;
+       struct wsm_tx *wsm;
+       bool tid_update = 0;
+       u8 flags = 0;
+       int ret;
+
+       if (priv->bh_error)
+               goto drop;
+
+       t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control);
+       t.da = ieee80211_get_DA(t.hdr);
+       if (control) {
+               t.sta = control->sta;
+               t.sta_priv = (struct cw1200_sta_priv *)&t.sta->drv_priv;
+       }
+
+       if (WARN_ON(t.queue >= 4))
+               goto drop;
+
+       ret = cw1200_tx_h_calc_link_ids(priv, &t);
+       if (ret)
+               goto drop;
+
+       pr_debug("[TX] TX %d bytes (queue: %d, link_id: %d (%d)).\n",
+                skb->len, t.queue, t.txpriv.link_id,
+                t.txpriv.raw_link_id);
+
+       cw1200_tx_h_pm(priv, &t);
+       cw1200_tx_h_calc_tid(priv, &t);
+       ret = cw1200_tx_h_crypt(priv, &t);
+       if (ret)
+               goto drop;
+       ret = cw1200_tx_h_align(priv, &t, &flags);
+       if (ret)
+               goto drop;
+       ret = cw1200_tx_h_action(priv, &t);
+       if (ret)
+               goto drop;
+       wsm = cw1200_tx_h_wsm(priv, &t);
+       if (!wsm) {
+               ret = -ENOMEM;
+               goto drop;
+       }
+       wsm->flags |= flags;
+       cw1200_tx_h_bt(priv, &t, wsm);
+       ret = cw1200_tx_h_rate_policy(priv, &t, wsm);
+       if (ret)
+               goto drop;
+
+       rcu_read_lock();
+       sta = rcu_dereference(t.sta);
+
+       spin_lock_bh(&priv->ps_state_lock);
+       {
+               tid_update = cw1200_tx_h_pm_state(priv, &t);
+               BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue],
+                                       t.skb, &t.txpriv));
+       }
+       spin_unlock_bh(&priv->ps_state_lock);
+
+       if (tid_update && sta)
+               ieee80211_sta_set_buffered(sta, t.txpriv.tid, true);
+
+       rcu_read_unlock();
+
+       cw1200_bh_wakeup(priv);
+
+       return;
+
+drop:
+       cw1200_skb_dtor(priv, skb, &t.txpriv);
+       return;
+}
+
+/* ******************************************************************** */
+
+static int cw1200_handle_action_rx(struct cw1200_common *priv,
+                                  struct sk_buff *skb)
+{
+       struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+       /* Filter block ACK negotiation: fully controlled by firmware */
+       if (mgmt->u.action.category == WLAN_CATEGORY_BACK)
+               return 1;
+
+       return 0;
+}
+
+static int cw1200_handle_pspoll(struct cw1200_common *priv,
+                               struct sk_buff *skb)
+{
+       struct ieee80211_sta *sta;
+       struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data;
+       int link_id = 0;
+       u32 pspoll_mask = 0;
+       int drop = 1;
+       int i;
+
+       if (priv->join_status != CW1200_JOIN_STATUS_AP)
+               goto done;
+       if (memcmp(priv->vif->addr, pspoll->bssid, ETH_ALEN))
+               goto done;
+
+       rcu_read_lock();
+       sta = ieee80211_find_sta(priv->vif, pspoll->ta);
+       if (sta) {
+               struct cw1200_sta_priv *sta_priv;
+               sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv;
+               link_id = sta_priv->link_id;
+               pspoll_mask = BIT(sta_priv->link_id);
+       }
+       rcu_read_unlock();
+       if (!link_id)
+               goto done;
+
+       priv->pspoll_mask |= pspoll_mask;
+       drop = 0;
+
+       /* Do not report pspols if data for given link id is queued already. */
+       for (i = 0; i < 4; ++i) {
+               if (cw1200_queue_get_num_queued(&priv->tx_queue[i],
+                                               pspoll_mask)) {
+                       cw1200_bh_wakeup(priv);
+                       drop = 1;
+                       break;
+               }
+       }
+       pr_debug("[RX] PSPOLL: %s\n", drop ? "local" : "fwd");
+done:
+       return drop;
+}
+
+/* ******************************************************************** */
+
+void cw1200_tx_confirm_cb(struct cw1200_common *priv,
+                         int link_id,
+                         struct wsm_tx_confirm *arg)
+{
+       u8 queue_id = cw1200_queue_get_queue_id(arg->packet_id);
+       struct cw1200_queue *queue = &priv->tx_queue[queue_id];
+       struct sk_buff *skb;
+       const struct cw1200_txpriv *txpriv;
+
+       pr_debug("[TX] TX confirm: %d, %d.\n",
+                arg->status, arg->ack_failures);
+
+       if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
+               /* STA is stopped. */
+               return;
+       }
+
+       if (WARN_ON(queue_id >= 4))
+               return;
+
+       if (arg->status)
+               pr_debug("TX failed: %d.\n", arg->status);
+
+       if ((arg->status == WSM_REQUEUE) &&
+           (arg->flags & WSM_TX_STATUS_REQUEUE)) {
+               /* "Requeue" means "implicit suspend" */
+               struct wsm_suspend_resume suspend = {
+                       .link_id = link_id,
+                       .stop = 1,
+                       .multicast = !link_id,
+               };
+               cw1200_suspend_resume(priv, &suspend);
+               wiphy_warn(priv->hw->wiphy, "Requeue for link_id %d (try %d). STAs asleep: 0x%.8X\n",
+                          link_id,
+                          cw1200_queue_get_generation(arg->packet_id) + 1,
+                          priv->sta_asleep_mask);
+               cw1200_queue_requeue(queue, arg->packet_id);
+               spin_lock_bh(&priv->ps_state_lock);
+               if (!link_id) {
+                       priv->buffered_multicasts = true;
+                       if (priv->sta_asleep_mask) {
+                               queue_work(priv->workqueue,
+                                          &priv->multicast_start_work);
+                       }
+               }
+               spin_unlock_bh(&priv->ps_state_lock);
+       } else if (!cw1200_queue_get_skb(queue, arg->packet_id,
+                                        &skb, &txpriv)) {
+               struct ieee80211_tx_info *tx = IEEE80211_SKB_CB(skb);
+               int tx_count = arg->ack_failures;
+               u8 ht_flags = 0;
+               int i;
+
+               if (cw1200_ht_greenfield(&priv->ht_info))
+                       ht_flags |= IEEE80211_TX_RC_GREEN_FIELD;
+
+               spin_lock(&priv->bss_loss_lock);
+               if (priv->bss_loss_state &&
+                   arg->packet_id == priv->bss_loss_confirm_id) {
+                       if (arg->status) {
+                               /* Recovery failed */
+                               __cw1200_cqm_bssloss_sm(priv, 0, 0, 1);
+                       } else {
+                               /* Recovery succeeded */
+                               __cw1200_cqm_bssloss_sm(priv, 0, 1, 0);
+                       }
+               }
+               spin_unlock(&priv->bss_loss_lock);
+
+               if (!arg->status) {
+                       tx->flags |= IEEE80211_TX_STAT_ACK;
+                       ++tx_count;
+                       cw1200_debug_txed(priv);
+                       if (arg->flags & WSM_TX_STATUS_AGGREGATION) {
+                               /* Do not report aggregation to mac80211:
+                                * it confuses minstrel a lot.
+                                */
+                               /* tx->flags |= IEEE80211_TX_STAT_AMPDU; */
+                               cw1200_debug_txed_agg(priv);
+                       }
+               } else {
+                       if (tx_count)
+                               ++tx_count;
+               }
+
+               for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) {
+                       if (tx->status.rates[i].count >= tx_count) {
+                               tx->status.rates[i].count = tx_count;
+                               break;
+                       }
+                       tx_count -= tx->status.rates[i].count;
+                       if (tx->status.rates[i].flags & IEEE80211_TX_RC_MCS)
+                               tx->status.rates[i].flags |= ht_flags;
+               }
+
+               for (++i; i < IEEE80211_TX_MAX_RATES; ++i) {
+                       tx->status.rates[i].count = 0;
+                       tx->status.rates[i].idx = -1;
+               }
+
+               /* Pull off any crypto trailers that we added on */
+               if (tx->control.hw_key) {
+                       skb_trim(skb, skb->len - tx->control.hw_key->icv_len);
+                       if (tx->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
+                               skb_trim(skb, skb->len - 8); /* MIC space */
+               }
+               cw1200_queue_remove(queue, arg->packet_id);
+       }
+       /* XXX TODO:  Only wake if there are pending transmits.. */
+       cw1200_bh_wakeup(priv);
+}
+
+static void cw1200_notify_buffered_tx(struct cw1200_common *priv,
+                              struct sk_buff *skb, int link_id, int tid)
+{
+       struct ieee80211_sta *sta;
+       struct ieee80211_hdr *hdr;
+       u8 *buffered;
+       u8 still_buffered = 0;
+
+       if (link_id && tid < CW1200_MAX_TID) {
+               buffered = priv->link_id_db
+                               [link_id - 1].buffered;
+
+               spin_lock_bh(&priv->ps_state_lock);
+               if (!WARN_ON(!buffered[tid]))
+                       still_buffered = --buffered[tid];
+               spin_unlock_bh(&priv->ps_state_lock);
+
+               if (!still_buffered && tid < CW1200_MAX_TID) {
+                       hdr = (struct ieee80211_hdr *)skb->data;
+                       rcu_read_lock();
+                       sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+                       if (sta)
+                               ieee80211_sta_set_buffered(sta, tid, false);
+                       rcu_read_unlock();
+               }
+       }
+}
+
+void cw1200_skb_dtor(struct cw1200_common *priv,
+                    struct sk_buff *skb,
+                    const struct cw1200_txpriv *txpriv)
+{
+       skb_pull(skb, txpriv->offset);
+       if (txpriv->rate_id != CW1200_INVALID_RATE_ID) {
+               cw1200_notify_buffered_tx(priv, skb,
+                                         txpriv->raw_link_id, txpriv->tid);
+               tx_policy_put(priv, txpriv->rate_id);
+       }
+       ieee80211_tx_status(priv->hw, skb);
+}
+
+void cw1200_rx_cb(struct cw1200_common *priv,
+                 struct wsm_rx *arg,
+                 int link_id,
+                 struct sk_buff **skb_p)
+{
+       struct sk_buff *skb = *skb_p;
+       struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb);
+       struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+       struct cw1200_link_entry *entry = NULL;
+       unsigned long grace_period;
+
+       bool early_data = false;
+       bool p2p = priv->vif && priv->vif->p2p;
+       size_t hdrlen;
+       hdr->flag = 0;
+
+       if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
+               /* STA is stopped. */
+               goto drop;
+       }
+
+       if (link_id && link_id <= CW1200_MAX_STA_IN_AP_MODE) {
+               entry = &priv->link_id_db[link_id - 1];
+               if (entry->status == CW1200_LINK_SOFT &&
+                   ieee80211_is_data(frame->frame_control))
+                       early_data = true;
+               entry->timestamp = jiffies;
+       } else if (p2p &&
+                  ieee80211_is_action(frame->frame_control) &&
+                  (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) {
+               pr_debug("[RX] Going to MAP&RESET link ID\n");
+               WARN_ON(work_pending(&priv->linkid_reset_work));
+               memcpy(&priv->action_frame_sa[0],
+                      ieee80211_get_SA(frame), ETH_ALEN);
+               priv->action_linkid = 0;
+               schedule_work(&priv->linkid_reset_work);
+       }
+
+       if (link_id && p2p &&
+           ieee80211_is_action(frame->frame_control) &&
+           (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) {
+               /* Link ID already exists for the ACTION frame.
+                * Reset and Remap
+                */
+               WARN_ON(work_pending(&priv->linkid_reset_work));
+               memcpy(&priv->action_frame_sa[0],
+                      ieee80211_get_SA(frame), ETH_ALEN);
+               priv->action_linkid = link_id;
+               schedule_work(&priv->linkid_reset_work);
+       }
+       if (arg->status) {
+               if (arg->status == WSM_STATUS_MICFAILURE) {
+                       pr_debug("[RX] MIC failure.\n");
+                       hdr->flag |= RX_FLAG_MMIC_ERROR;
+               } else if (arg->status == WSM_STATUS_NO_KEY_FOUND) {
+                       pr_debug("[RX] No key found.\n");
+                       goto drop;
+               } else {
+                       pr_debug("[RX] Receive failure: %d.\n",
+                                arg->status);
+                       goto drop;
+               }
+       }
+
+       if (skb->len < sizeof(struct ieee80211_pspoll)) {
+               wiphy_warn(priv->hw->wiphy, "Mailformed SDU rx'ed. Size is lesser than IEEE header.\n");
+               goto drop;
+       }
+
+       if (ieee80211_is_pspoll(frame->frame_control))
+               if (cw1200_handle_pspoll(priv, skb))
+                       goto drop;
+
+       hdr->band = ((arg->channel_number & 0xff00) ||
+                    (arg->channel_number > 14)) ?
+                       IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+       hdr->freq = ieee80211_channel_to_frequency(
+                       arg->channel_number,
+                       hdr->band);
+
+       if (arg->rx_rate >= 14) {
+               hdr->flag |= RX_FLAG_HT;
+               hdr->rate_idx = arg->rx_rate - 14;
+       } else if (arg->rx_rate >= 4) {
+               hdr->rate_idx = arg->rx_rate - 2;
+       } else {
+               hdr->rate_idx = arg->rx_rate;
+       }
+
+       hdr->signal = (s8)arg->rcpi_rssi;
+       hdr->antenna = 0;
+
+       hdrlen = ieee80211_hdrlen(frame->frame_control);
+
+       if (WSM_RX_STATUS_ENCRYPTION(arg->flags)) {
+               size_t iv_len = 0, icv_len = 0;
+
+               hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED;
+
+               /* Oops... There is no fast way to ask mac80211 about
+                * IV/ICV lengths. Even defineas are not exposed.
+                */
+               switch (WSM_RX_STATUS_ENCRYPTION(arg->flags)) {
+               case WSM_RX_STATUS_WEP:
+                       iv_len = 4 /* WEP_IV_LEN */;
+                       icv_len = 4 /* WEP_ICV_LEN */;
+                       break;
+               case WSM_RX_STATUS_TKIP:
+                       iv_len = 8 /* TKIP_IV_LEN */;
+                       icv_len = 4 /* TKIP_ICV_LEN */
+                               + 8 /*MICHAEL_MIC_LEN*/;
+                       hdr->flag |= RX_FLAG_MMIC_STRIPPED;
+                       break;
+               case WSM_RX_STATUS_AES:
+                       iv_len = 8 /* CCMP_HDR_LEN */;
+                       icv_len = 8 /* CCMP_MIC_LEN */;
+                       break;
+               case WSM_RX_STATUS_WAPI:
+                       iv_len = 18 /* WAPI_HDR_LEN */;
+                       icv_len = 16 /* WAPI_MIC_LEN */;
+                       break;
+               default:
+                       pr_warn("Unknown encryption type %d\n",
+                               WSM_RX_STATUS_ENCRYPTION(arg->flags));
+                       goto drop;
+               }
+
+               /* Firmware strips ICV in case of MIC failure. */
+               if (arg->status == WSM_STATUS_MICFAILURE)
+                       icv_len = 0;
+
+               if (skb->len < hdrlen + iv_len + icv_len) {
+                       wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than crypto headers.\n");
+                       goto drop;
+               }
+
+               /* Remove IV, ICV and MIC */
+               skb_trim(skb, skb->len - icv_len);
+               memmove(skb->data + iv_len, skb->data, hdrlen);
+               skb_pull(skb, iv_len);
+       }
+
+       /* Remove TSF from the end of frame */
+       if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) {
+               memcpy(&hdr->mactime, skb->data + skb->len - 8, 8);
+               hdr->mactime = le64_to_cpu(hdr->mactime);
+               if (skb->len >= 8)
+                       skb_trim(skb, skb->len - 8);
+       } else {
+               hdr->mactime = 0;
+       }
+
+       cw1200_debug_rxed(priv);
+       if (arg->flags & WSM_RX_STATUS_AGGREGATE)
+               cw1200_debug_rxed_agg(priv);
+
+       if (ieee80211_is_action(frame->frame_control) &&
+           (arg->flags & WSM_RX_STATUS_ADDRESS1)) {
+               if (cw1200_handle_action_rx(priv, skb))
+                       return;
+       } else if (ieee80211_is_beacon(frame->frame_control) &&
+                  !arg->status &&
+                  !memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid,
+                          ETH_ALEN)) {
+               const u8 *tim_ie;
+               u8 *ies = ((struct ieee80211_mgmt *)
+                         (skb->data))->u.beacon.variable;
+               size_t ies_len = skb->len - (ies - (u8 *)(skb->data));
+
+               tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
+               if (tim_ie) {
+                       struct ieee80211_tim_ie *tim =
+                               (struct ieee80211_tim_ie *)&tim_ie[2];
+
+                       if (priv->join_dtim_period != tim->dtim_period) {
+                               priv->join_dtim_period = tim->dtim_period;
+                               queue_work(priv->workqueue,
+                                          &priv->set_beacon_wakeup_period_work);
+                       }
+               }
+
+               /* Disable beacon filter once we're associated... */
+               if (priv->disable_beacon_filter &&
+                   (priv->vif->bss_conf.assoc ||
+                    priv->vif->bss_conf.ibss_joined)) {
+                       priv->disable_beacon_filter = false;
+                       queue_work(priv->workqueue,
+                                  &priv->update_filtering_work);
+               }
+       }
+
+       /* Stay awake after frame is received to give
+        * userspace chance to react and acquire appropriate
+        * wakelock.
+        */
+       if (ieee80211_is_auth(frame->frame_control))
+               grace_period = 5 * HZ;
+       else if (ieee80211_is_deauth(frame->frame_control))
+               grace_period = 5 * HZ;
+       else
+               grace_period = 1 * HZ;
+       cw1200_pm_stay_awake(&priv->pm_state, grace_period);
+
+       if (early_data) {
+               spin_lock_bh(&priv->ps_state_lock);
+               /* Double-check status with lock held */
+               if (entry->status == CW1200_LINK_SOFT)
+                       skb_queue_tail(&entry->rx_queue, skb);
+               else
+                       ieee80211_rx_irqsafe(priv->hw, skb);
+               spin_unlock_bh(&priv->ps_state_lock);
+       } else {
+               ieee80211_rx_irqsafe(priv->hw, skb);
+       }
+       *skb_p = NULL;
+
+       return;
+
+drop:
+       /* TODO: update failure counters */
+       return;
+}
+
+/* ******************************************************************** */
+/* Security                                                            */
+
+int cw1200_alloc_key(struct cw1200_common *priv)
+{
+       int idx;
+
+       idx = ffs(~priv->key_map) - 1;
+       if (idx < 0 || idx > WSM_KEY_MAX_INDEX)
+               return -1;
+
+       priv->key_map |= BIT(idx);
+       priv->keys[idx].index = idx;
+       return idx;
+}
+
+void cw1200_free_key(struct cw1200_common *priv, int idx)
+{
+       BUG_ON(!(priv->key_map & BIT(idx)));
+       memset(&priv->keys[idx], 0, sizeof(priv->keys[idx]));
+       priv->key_map &= ~BIT(idx);
+}
+
+void cw1200_free_keys(struct cw1200_common *priv)
+{
+       memset(&priv->keys, 0, sizeof(priv->keys));
+       priv->key_map = 0;
+}
+
+int cw1200_upload_keys(struct cw1200_common *priv)
+{
+       int idx, ret = 0;
+       for (idx = 0; idx <= WSM_KEY_MAX_INDEX; ++idx)
+               if (priv->key_map & BIT(idx)) {
+                       ret = wsm_add_key(priv, &priv->keys[idx]);
+                       if (ret < 0)
+                               break;
+               }
+       return ret;
+}
+
+/* Workaround for WFD test case 6.1.10 */
+void cw1200_link_id_reset(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, linkid_reset_work);
+       int temp_linkid;
+
+       if (!priv->action_linkid) {
+               /* In GO mode we can receive ACTION frames without a linkID */
+               temp_linkid = cw1200_alloc_link_id(priv,
+                               &priv->action_frame_sa[0]);
+               WARN_ON(!temp_linkid);
+               if (temp_linkid) {
+                       /* Make sure we execute the WQ */
+                       flush_workqueue(priv->workqueue);
+                       /* Release the link ID */
+                       spin_lock_bh(&priv->ps_state_lock);
+                       priv->link_id_db[temp_linkid - 1].prev_status =
+                               priv->link_id_db[temp_linkid - 1].status;
+                       priv->link_id_db[temp_linkid - 1].status =
+                               CW1200_LINK_RESET;
+                       spin_unlock_bh(&priv->ps_state_lock);
+                       wsm_lock_tx_async(priv);
+                       if (queue_work(priv->workqueue,
+                                      &priv->link_id_work) <= 0)
+                               wsm_unlock_tx(priv);
+               }
+       } else {
+               spin_lock_bh(&priv->ps_state_lock);
+               priv->link_id_db[priv->action_linkid - 1].prev_status =
+                       priv->link_id_db[priv->action_linkid - 1].status;
+               priv->link_id_db[priv->action_linkid - 1].status =
+                       CW1200_LINK_RESET_REMAP;
+               spin_unlock_bh(&priv->ps_state_lock);
+               wsm_lock_tx_async(priv);
+               if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
+                       wsm_unlock_tx(priv);
+               flush_workqueue(priv->workqueue);
+       }
+}
+
+int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac)
+{
+       int i, ret = 0;
+       spin_lock_bh(&priv->ps_state_lock);
+       for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
+               if (!memcmp(mac, priv->link_id_db[i].mac, ETH_ALEN) &&
+                   priv->link_id_db[i].status) {
+                       priv->link_id_db[i].timestamp = jiffies;
+                       ret = i + 1;
+                       break;
+               }
+       }
+       spin_unlock_bh(&priv->ps_state_lock);
+       return ret;
+}
+
+int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac)
+{
+       int i, ret = 0;
+       unsigned long max_inactivity = 0;
+       unsigned long now = jiffies;
+
+       spin_lock_bh(&priv->ps_state_lock);
+       for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
+               if (!priv->link_id_db[i].status) {
+                       ret = i + 1;
+                       break;
+               } else if (priv->link_id_db[i].status != CW1200_LINK_HARD &&
+                          !priv->tx_queue_stats.link_map_cache[i + 1]) {
+                       unsigned long inactivity =
+                               now - priv->link_id_db[i].timestamp;
+                       if (inactivity < max_inactivity)
+                               continue;
+                       max_inactivity = inactivity;
+                       ret = i + 1;
+               }
+       }
+       if (ret) {
+               struct cw1200_link_entry *entry = &priv->link_id_db[ret - 1];
+               pr_debug("[AP] STA added, link_id: %d\n", ret);
+               entry->status = CW1200_LINK_RESERVE;
+               memcpy(&entry->mac, mac, ETH_ALEN);
+               memset(&entry->buffered, 0, CW1200_MAX_TID);
+               skb_queue_head_init(&entry->rx_queue);
+               wsm_lock_tx_async(priv);
+               if (queue_work(priv->workqueue, &priv->link_id_work) <= 0)
+                       wsm_unlock_tx(priv);
+       } else {
+               wiphy_info(priv->hw->wiphy,
+                          "[AP] Early: no more link IDs available.\n");
+       }
+
+       spin_unlock_bh(&priv->ps_state_lock);
+       return ret;
+}
+
+void cw1200_link_id_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, link_id_work);
+       wsm_flush_tx(priv);
+       cw1200_link_id_gc_work(&priv->link_id_gc_work.work);
+       wsm_unlock_tx(priv);
+}
+
+void cw1200_link_id_gc_work(struct work_struct *work)
+{
+       struct cw1200_common *priv =
+               container_of(work, struct cw1200_common, link_id_gc_work.work);
+       struct wsm_reset reset = {
+               .reset_statistics = false,
+       };
+       struct wsm_map_link map_link = {
+               .link_id = 0,
+       };
+       unsigned long now = jiffies;
+       unsigned long next_gc = -1;
+       long ttl;
+       bool need_reset;
+       u32 mask;
+       int i;
+
+       if (priv->join_status != CW1200_JOIN_STATUS_AP)
+               return;
+
+       wsm_lock_tx(priv);
+       spin_lock_bh(&priv->ps_state_lock);
+       for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) {
+               need_reset = false;
+               mask = BIT(i + 1);
+               if (priv->link_id_db[i].status == CW1200_LINK_RESERVE ||
+                   (priv->link_id_db[i].status == CW1200_LINK_HARD &&
+                    !(priv->link_id_map & mask))) {
+                       if (priv->link_id_map & mask) {
+                               priv->sta_asleep_mask &= ~mask;
+                               priv->pspoll_mask &= ~mask;
+                               need_reset = true;
+                       }
+                       priv->link_id_map |= mask;
+                       if (priv->link_id_db[i].status != CW1200_LINK_HARD)
+                               priv->link_id_db[i].status = CW1200_LINK_SOFT;
+                       memcpy(map_link.mac_addr, priv->link_id_db[i].mac,
+                              ETH_ALEN);
+                       spin_unlock_bh(&priv->ps_state_lock);
+                       if (need_reset) {
+                               reset.link_id = i + 1;
+                               wsm_reset(priv, &reset);
+                       }
+                       map_link.link_id = i + 1;
+                       wsm_map_link(priv, &map_link);
+                       next_gc = min(next_gc, CW1200_LINK_ID_GC_TIMEOUT);
+                       spin_lock_bh(&priv->ps_state_lock);
+               } else if (priv->link_id_db[i].status == CW1200_LINK_SOFT) {
+                       ttl = priv->link_id_db[i].timestamp - now +
+                                       CW1200_LINK_ID_GC_TIMEOUT;
+                       if (ttl <= 0) {
+                               need_reset = true;
+                               priv->link_id_db[i].status = CW1200_LINK_OFF;
+                               priv->link_id_map &= ~mask;
+                               priv->sta_asleep_mask &= ~mask;
+                               priv->pspoll_mask &= ~mask;
+                               memset(map_link.mac_addr, 0, ETH_ALEN);
+                               spin_unlock_bh(&priv->ps_state_lock);
+                               reset.link_id = i + 1;
+                               wsm_reset(priv, &reset);
+                               spin_lock_bh(&priv->ps_state_lock);
+                       } else {
+                               next_gc = min_t(unsigned long, next_gc, ttl);
+                       }
+               } else if (priv->link_id_db[i].status == CW1200_LINK_RESET ||
+                               priv->link_id_db[i].status ==
+                               CW1200_LINK_RESET_REMAP) {
+                       int status = priv->link_id_db[i].status;
+                       priv->link_id_db[i].status =
+                                       priv->link_id_db[i].prev_status;
+                       priv->link_id_db[i].timestamp = now;
+                       reset.link_id = i + 1;
+                       spin_unlock_bh(&priv->ps_state_lock);
+                       wsm_reset(priv, &reset);
+                       if (status == CW1200_LINK_RESET_REMAP) {
+                               memcpy(map_link.mac_addr,
+                                      priv->link_id_db[i].mac,
+                                      ETH_ALEN);
+                               map_link.link_id = i + 1;
+                               wsm_map_link(priv, &map_link);
+                               next_gc = min(next_gc,
+                                               CW1200_LINK_ID_GC_TIMEOUT);
+                       }
+                       spin_lock_bh(&priv->ps_state_lock);
+               }
+               if (need_reset) {
+                       skb_queue_purge(&priv->link_id_db[i].rx_queue);
+                       pr_debug("[AP] STA removed, link_id: %d\n",
+                                reset.link_id);
+               }
+       }
+       spin_unlock_bh(&priv->ps_state_lock);
+       if (next_gc != -1)
+               queue_delayed_work(priv->workqueue,
+                                  &priv->link_id_gc_work, next_gc);
+       wsm_unlock_tx(priv);
+}
diff --git a/drivers/net/wireless/cw1200/txrx.h b/drivers/net/wireless/cw1200/txrx.h
new file mode 100644 (file)
index 0000000..492a4e1
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Datapath interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_TXRX_H
+#define CW1200_TXRX_H
+
+#include <linux/list.h>
+
+/* extern */ struct ieee80211_hw;
+/* extern */ struct sk_buff;
+/* extern */ struct wsm_tx;
+/* extern */ struct wsm_rx;
+/* extern */ struct wsm_tx_confirm;
+/* extern */ struct cw1200_txpriv;
+
+struct tx_policy {
+       union {
+               __le32 tbl[3];
+               u8 raw[12];
+       };
+       u8  defined;
+       u8  usage_count;
+       u8  retry_count;
+       u8  uploaded;
+};
+
+struct tx_policy_cache_entry {
+       struct tx_policy policy;
+       struct list_head link;
+};
+
+#define TX_POLICY_CACHE_SIZE   (8)
+struct tx_policy_cache {
+       struct tx_policy_cache_entry cache[TX_POLICY_CACHE_SIZE];
+       struct list_head used;
+       struct list_head free;
+       spinlock_t lock; /* Protect policy cache */
+};
+
+/* ******************************************************************** */
+/* TX policy cache                                                     */
+/* Intention of TX policy cache is an overcomplicated WSM API.
+ * Device does not accept per-PDU tx retry sequence.
+ * It uses "tx retry policy id" instead, so driver code has to sync
+ * linux tx retry sequences with a retry policy table in the device.
+ */
+void tx_policy_init(struct cw1200_common *priv);
+void tx_policy_upload_work(struct work_struct *work);
+void tx_policy_clean(struct cw1200_common *priv);
+
+/* ******************************************************************** */
+/* TX implementation                                                   */
+
+u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv,
+                              u32 rates);
+void cw1200_tx(struct ieee80211_hw *dev,
+              struct ieee80211_tx_control *control,
+              struct sk_buff *skb);
+void cw1200_skb_dtor(struct cw1200_common *priv,
+                    struct sk_buff *skb,
+                    const struct cw1200_txpriv *txpriv);
+
+/* ******************************************************************** */
+/* WSM callbacks                                                       */
+
+void cw1200_tx_confirm_cb(struct cw1200_common *priv,
+                         int link_id,
+                         struct wsm_tx_confirm *arg);
+void cw1200_rx_cb(struct cw1200_common *priv,
+                 struct wsm_rx *arg,
+                 int link_id,
+                 struct sk_buff **skb_p);
+
+/* ******************************************************************** */
+/* Timeout                                                             */
+
+void cw1200_tx_timeout(struct work_struct *work);
+
+/* ******************************************************************** */
+/* Security                                                            */
+int cw1200_alloc_key(struct cw1200_common *priv);
+void cw1200_free_key(struct cw1200_common *priv, int idx);
+void cw1200_free_keys(struct cw1200_common *priv);
+int cw1200_upload_keys(struct cw1200_common *priv);
+
+/* ******************************************************************** */
+/* Workaround for WFD test case 6.1.10                                 */
+void cw1200_link_id_reset(struct work_struct *work);
+
+#define CW1200_LINK_ID_GC_TIMEOUT ((unsigned long)(10 * HZ))
+
+int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac);
+int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac);
+void cw1200_link_id_work(struct work_struct *work);
+void cw1200_link_id_gc_work(struct work_struct *work);
+
+
+#endif /* CW1200_TXRX_H */
diff --git a/drivers/net/wireless/cw1200/wsm.c b/drivers/net/wireless/cw1200/wsm.c
new file mode 100644 (file)
index 0000000..cbb74d7
--- /dev/null
@@ -0,0 +1,1822 @@
+/*
+ * WSM host interface (HI) implementation for
+ * ST-Ericsson CW1200 mac80211 drivers.
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#include "cw1200.h"
+#include "wsm.h"
+#include "bh.h"
+#include "sta.h"
+#include "debug.h"
+
+#define WSM_CMD_TIMEOUT                (2 * HZ) /* With respect to interrupt loss */
+#define WSM_CMD_START_TIMEOUT  (7 * HZ)
+#define WSM_CMD_RESET_TIMEOUT  (3 * HZ) /* 2 sec. timeout was observed.   */
+#define WSM_CMD_MAX_TIMEOUT    (3 * HZ)
+
+#define WSM_SKIP(buf, size)                                            \
+       do {                                                            \
+               if ((buf)->data + size > (buf)->end)                    \
+                       goto underflow;                                 \
+               (buf)->data += size;                                    \
+       } while (0)
+
+#define WSM_GET(buf, ptr, size)                                                \
+       do {                                                            \
+               if ((buf)->data + size > (buf)->end)                    \
+                       goto underflow;                                 \
+               memcpy(ptr, (buf)->data, size);                         \
+               (buf)->data += size;                                    \
+       } while (0)
+
+#define __WSM_GET(buf, type, type2, cvt)                               \
+       ({                                                              \
+               type val;                                               \
+               if ((buf)->data + sizeof(type) > (buf)->end)            \
+                       goto underflow;                                 \
+               val = cvt(*(type2 *)(buf)->data);                       \
+               (buf)->data += sizeof(type);                            \
+               val;                                                    \
+       })
+
+#define WSM_GET8(buf)  __WSM_GET(buf, u8, u8, (u8))
+#define WSM_GET16(buf) __WSM_GET(buf, u16, __le16, __le16_to_cpu)
+#define WSM_GET32(buf) __WSM_GET(buf, u32, __le32, __le32_to_cpu)
+
+#define WSM_PUT(buf, ptr, size)                                                \
+       do {                                                            \
+               if ((buf)->data + size > (buf)->end)            \
+                       if (wsm_buf_reserve((buf), size))       \
+                               goto nomem;                             \
+               memcpy((buf)->data, ptr, size);                         \
+               (buf)->data += size;                                    \
+       } while (0)
+
+#define __WSM_PUT(buf, val, type, type2, cvt)                          \
+       do {                                                            \
+               if ((buf)->data + sizeof(type) > (buf)->end)            \
+                       if (wsm_buf_reserve((buf), sizeof(type))) \
+                               goto nomem;                             \
+               *(type2 *)(buf)->data = cvt(val);                       \
+               (buf)->data += sizeof(type);                            \
+       } while (0)
+
+#define WSM_PUT8(buf, val)  __WSM_PUT(buf, val, u8, u8, (u8))
+#define WSM_PUT16(buf, val) __WSM_PUT(buf, val, u16, __le16, __cpu_to_le16)
+#define WSM_PUT32(buf, val) __WSM_PUT(buf, val, u32, __le32, __cpu_to_le32)
+
+static void wsm_buf_reset(struct wsm_buf *buf);
+static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size);
+
+static int wsm_cmd_send(struct cw1200_common *priv,
+                       struct wsm_buf *buf,
+                       void *arg, u16 cmd, long tmo);
+
+#define wsm_cmd_lock(__priv) mutex_lock(&((__priv)->wsm_cmd_mux))
+#define wsm_cmd_unlock(__priv) mutex_unlock(&((__priv)->wsm_cmd_mux))
+
+/* ******************************************************************** */
+/* WSM API implementation                                              */
+
+static int wsm_generic_confirm(struct cw1200_common *priv,
+                            void *arg,
+                            struct wsm_buf *buf)
+{
+       u32 status = WSM_GET32(buf);
+       if (status != WSM_STATUS_SUCCESS)
+               return -EINVAL;
+       return 0;
+
+underflow:
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+int wsm_configuration(struct cw1200_common *priv, struct wsm_configuration *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT32(buf, arg->dot11MaxTransmitMsduLifeTime);
+       WSM_PUT32(buf, arg->dot11MaxReceiveLifeTime);
+       WSM_PUT32(buf, arg->dot11RtsThreshold);
+
+       /* DPD block. */
+       WSM_PUT16(buf, arg->dpdData_size + 12);
+       WSM_PUT16(buf, 1); /* DPD version */
+       WSM_PUT(buf, arg->dot11StationId, ETH_ALEN);
+       WSM_PUT16(buf, 5); /* DPD flags */
+       WSM_PUT(buf, arg->dpdData, arg->dpdData_size);
+
+       ret = wsm_cmd_send(priv, buf, arg,
+                          WSM_CONFIGURATION_REQ_ID, WSM_CMD_TIMEOUT);
+
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+static int wsm_configuration_confirm(struct cw1200_common *priv,
+                                    struct wsm_configuration *arg,
+                                    struct wsm_buf *buf)
+{
+       int i;
+       int status;
+
+       status = WSM_GET32(buf);
+       if (WARN_ON(status != WSM_STATUS_SUCCESS))
+               return -EINVAL;
+
+       WSM_GET(buf, arg->dot11StationId, ETH_ALEN);
+       arg->dot11FrequencyBandsSupported = WSM_GET8(buf);
+       WSM_SKIP(buf, 1);
+       arg->supportedRateMask = WSM_GET32(buf);
+       for (i = 0; i < 2; ++i) {
+               arg->txPowerRange[i].min_power_level = WSM_GET32(buf);
+               arg->txPowerRange[i].max_power_level = WSM_GET32(buf);
+               arg->txPowerRange[i].stepping = WSM_GET32(buf);
+       }
+       return 0;
+
+underflow:
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+/* ******************************************************************** */
+
+int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+       u16 cmd = WSM_RESET_REQ_ID | WSM_TX_LINK_ID(arg->link_id);
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT32(buf, arg->reset_statistics ? 0 : 1);
+       ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_RESET_TIMEOUT);
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+struct wsm_mib {
+       u16 mib_id;
+       void *buf;
+       size_t buf_size;
+};
+
+int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *_buf,
+                       size_t buf_size)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+       struct wsm_mib mib_buf = {
+               .mib_id = mib_id,
+               .buf = _buf,
+               .buf_size = buf_size,
+       };
+       wsm_cmd_lock(priv);
+
+       WSM_PUT16(buf, mib_id);
+       WSM_PUT16(buf, 0);
+
+       ret = wsm_cmd_send(priv, buf, &mib_buf,
+                          WSM_READ_MIB_REQ_ID, WSM_CMD_TIMEOUT);
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+static int wsm_read_mib_confirm(struct cw1200_common *priv,
+                               struct wsm_mib *arg,
+                               struct wsm_buf *buf)
+{
+       u16 size;
+       if (WARN_ON(WSM_GET32(buf) != WSM_STATUS_SUCCESS))
+               return -EINVAL;
+
+       if (WARN_ON(WSM_GET16(buf) != arg->mib_id))
+               return -EINVAL;
+
+       size = WSM_GET16(buf);
+       if (size > arg->buf_size)
+               size = arg->buf_size;
+
+       WSM_GET(buf, arg->buf, size);
+       arg->buf_size = size;
+       return 0;
+
+underflow:
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+/* ******************************************************************** */
+
+int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *_buf,
+                       size_t buf_size)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+       struct wsm_mib mib_buf = {
+               .mib_id = mib_id,
+               .buf = _buf,
+               .buf_size = buf_size,
+       };
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT16(buf, mib_id);
+       WSM_PUT16(buf, buf_size);
+       WSM_PUT(buf, _buf, buf_size);
+
+       ret = wsm_cmd_send(priv, buf, &mib_buf,
+                          WSM_WRITE_MIB_REQ_ID, WSM_CMD_TIMEOUT);
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+static int wsm_write_mib_confirm(struct cw1200_common *priv,
+                               struct wsm_mib *arg,
+                               struct wsm_buf *buf)
+{
+       int ret;
+
+       ret = wsm_generic_confirm(priv, arg, buf);
+       if (ret)
+               return ret;
+
+       if (arg->mib_id == WSM_MIB_ID_OPERATIONAL_POWER_MODE) {
+               /* OperationalMode: update PM status. */
+               const char *p = arg->buf;
+               cw1200_enable_powersave(priv, (p[0] & 0x0F) ? true : false);
+       }
+       return 0;
+}
+
+/* ******************************************************************** */
+
+int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg)
+{
+       int i;
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+       if (arg->num_channels > 48)
+               return -EINVAL;
+
+       if (arg->num_ssids > 2)
+               return -EINVAL;
+
+       if (arg->band > 1)
+               return -EINVAL;
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT8(buf, arg->band);
+       WSM_PUT8(buf, arg->type);
+       WSM_PUT8(buf, arg->flags);
+       WSM_PUT8(buf, arg->max_tx_rate);
+       WSM_PUT32(buf, arg->auto_scan_interval);
+       WSM_PUT8(buf, arg->num_probes);
+       WSM_PUT8(buf, arg->num_channels);
+       WSM_PUT8(buf, arg->num_ssids);
+       WSM_PUT8(buf, arg->probe_delay);
+
+       for (i = 0; i < arg->num_channels; ++i) {
+               WSM_PUT16(buf, arg->ch[i].number);
+               WSM_PUT16(buf, 0);
+               WSM_PUT32(buf, arg->ch[i].min_chan_time);
+               WSM_PUT32(buf, arg->ch[i].max_chan_time);
+               WSM_PUT32(buf, 0);
+       }
+
+       for (i = 0; i < arg->num_ssids; ++i) {
+               WSM_PUT32(buf, arg->ssids[i].length);
+               WSM_PUT(buf, &arg->ssids[i].ssid[0],
+                       sizeof(arg->ssids[i].ssid));
+       }
+
+       ret = wsm_cmd_send(priv, buf, NULL,
+                          WSM_START_SCAN_REQ_ID, WSM_CMD_TIMEOUT);
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_stop_scan(struct cw1200_common *priv)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+       wsm_cmd_lock(priv);
+       ret = wsm_cmd_send(priv, buf, NULL,
+                          WSM_STOP_SCAN_REQ_ID, WSM_CMD_TIMEOUT);
+       wsm_cmd_unlock(priv);
+       return ret;
+}
+
+
+static int wsm_tx_confirm(struct cw1200_common *priv,
+                         struct wsm_buf *buf,
+                         int link_id)
+{
+       struct wsm_tx_confirm tx_confirm;
+
+       tx_confirm.packet_id = WSM_GET32(buf);
+       tx_confirm.status = WSM_GET32(buf);
+       tx_confirm.tx_rate = WSM_GET8(buf);
+       tx_confirm.ack_failures = WSM_GET8(buf);
+       tx_confirm.flags = WSM_GET16(buf);
+       tx_confirm.media_delay = WSM_GET32(buf);
+       tx_confirm.tx_queue_delay = WSM_GET32(buf);
+
+       cw1200_tx_confirm_cb(priv, link_id, &tx_confirm);
+       return 0;
+
+underflow:
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+static int wsm_multi_tx_confirm(struct cw1200_common *priv,
+                               struct wsm_buf *buf, int link_id)
+{
+       int ret;
+       int count;
+       int i;
+
+       count = WSM_GET32(buf);
+       if (WARN_ON(count <= 0))
+               return -EINVAL;
+
+       if (count > 1) {
+               /* We already released one buffer, now for the rest */
+               ret = wsm_release_tx_buffer(priv, count - 1);
+               if (ret < 0)
+                       return ret;
+               else if (ret > 0)
+                       cw1200_bh_wakeup(priv);
+       }
+
+       cw1200_debug_txed_multi(priv, count);
+       for (i = 0; i < count; ++i) {
+               ret = wsm_tx_confirm(priv, buf, link_id);
+               if (ret)
+                       return ret;
+       }
+       return ret;
+
+underflow:
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+/* ******************************************************************** */
+
+static int wsm_join_confirm(struct cw1200_common *priv,
+                           struct wsm_join_cnf *arg,
+                           struct wsm_buf *buf)
+{
+       arg->status = WSM_GET32(buf);
+       if (WARN_ON(arg->status) != WSM_STATUS_SUCCESS)
+               return -EINVAL;
+
+       arg->min_power_level = WSM_GET32(buf);
+       arg->max_power_level = WSM_GET32(buf);
+
+       return 0;
+
+underflow:
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+int wsm_join(struct cw1200_common *priv, struct wsm_join *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+       struct wsm_join_cnf resp;
+       wsm_cmd_lock(priv);
+
+       WSM_PUT8(buf, arg->mode);
+       WSM_PUT8(buf, arg->band);
+       WSM_PUT16(buf, arg->channel_number);
+       WSM_PUT(buf, &arg->bssid[0], sizeof(arg->bssid));
+       WSM_PUT16(buf, arg->atim_window);
+       WSM_PUT8(buf, arg->preamble_type);
+       WSM_PUT8(buf, arg->probe_for_join);
+       WSM_PUT8(buf, arg->dtim_period);
+       WSM_PUT8(buf, arg->flags);
+       WSM_PUT32(buf, arg->ssid_len);
+       WSM_PUT(buf, &arg->ssid[0], sizeof(arg->ssid));
+       WSM_PUT32(buf, arg->beacon_interval);
+       WSM_PUT32(buf, arg->basic_rate_set);
+
+       priv->tx_burst_idx = -1;
+       ret = wsm_cmd_send(priv, buf, &resp,
+                          WSM_JOIN_REQ_ID, WSM_CMD_TIMEOUT);
+       /* TODO:  Update state based on resp.min|max_power_level */
+
+       priv->join_complete_status = resp.status;
+
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_set_bss_params(struct cw1200_common *priv,
+                      const struct wsm_set_bss_params *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT8(buf, (arg->reset_beacon_loss ?  0x1 : 0));
+       WSM_PUT8(buf, arg->beacon_lost_count);
+       WSM_PUT16(buf, arg->aid);
+       WSM_PUT32(buf, arg->operational_rate_set);
+
+       ret = wsm_cmd_send(priv, buf, NULL,
+                          WSM_SET_BSS_PARAMS_REQ_ID, WSM_CMD_TIMEOUT);
+
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT(buf, arg, sizeof(*arg));
+
+       ret = wsm_cmd_send(priv, buf, NULL,
+                          WSM_ADD_KEY_REQ_ID, WSM_CMD_TIMEOUT);
+
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_remove_key(struct cw1200_common *priv, const struct wsm_remove_key *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT8(buf, arg->index);
+       WSM_PUT8(buf, 0);
+       WSM_PUT16(buf, 0);
+
+       ret = wsm_cmd_send(priv, buf, NULL,
+                          WSM_REMOVE_KEY_REQ_ID, WSM_CMD_TIMEOUT);
+
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_set_tx_queue_params(struct cw1200_common *priv,
+               const struct wsm_set_tx_queue_params *arg, u8 id)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+       u8 queue_id_to_wmm_aci[] = {3, 2, 0, 1};
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT8(buf, queue_id_to_wmm_aci[id]);
+       WSM_PUT8(buf, 0);
+       WSM_PUT8(buf, arg->ackPolicy);
+       WSM_PUT8(buf, 0);
+       WSM_PUT32(buf, arg->maxTransmitLifetime);
+       WSM_PUT16(buf, arg->allowedMediumTime);
+       WSM_PUT16(buf, 0);
+
+       ret = wsm_cmd_send(priv, buf, NULL, 0x0012, WSM_CMD_TIMEOUT);
+
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_set_edca_params(struct cw1200_common *priv,
+                               const struct wsm_edca_params *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+       wsm_cmd_lock(priv);
+
+       /* Implemented according to specification. */
+
+       WSM_PUT16(buf, arg->params[3].cwmin);
+       WSM_PUT16(buf, arg->params[2].cwmin);
+       WSM_PUT16(buf, arg->params[1].cwmin);
+       WSM_PUT16(buf, arg->params[0].cwmin);
+
+       WSM_PUT16(buf, arg->params[3].cwmax);
+       WSM_PUT16(buf, arg->params[2].cwmax);
+       WSM_PUT16(buf, arg->params[1].cwmax);
+       WSM_PUT16(buf, arg->params[0].cwmax);
+
+       WSM_PUT8(buf, arg->params[3].aifns);
+       WSM_PUT8(buf, arg->params[2].aifns);
+       WSM_PUT8(buf, arg->params[1].aifns);
+       WSM_PUT8(buf, arg->params[0].aifns);
+
+       WSM_PUT16(buf, arg->params[3].txop_limit);
+       WSM_PUT16(buf, arg->params[2].txop_limit);
+       WSM_PUT16(buf, arg->params[1].txop_limit);
+       WSM_PUT16(buf, arg->params[0].txop_limit);
+
+       WSM_PUT32(buf, arg->params[3].max_rx_lifetime);
+       WSM_PUT32(buf, arg->params[2].max_rx_lifetime);
+       WSM_PUT32(buf, arg->params[1].max_rx_lifetime);
+       WSM_PUT32(buf, arg->params[0].max_rx_lifetime);
+
+       ret = wsm_cmd_send(priv, buf, NULL,
+                          WSM_EDCA_PARAMS_REQ_ID, WSM_CMD_TIMEOUT);
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_switch_channel(struct cw1200_common *priv,
+                       const struct wsm_switch_channel *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT8(buf, arg->mode);
+       WSM_PUT8(buf, arg->switch_count);
+       WSM_PUT16(buf, arg->channel_number);
+
+       priv->channel_switch_in_progress = 1;
+
+       ret = wsm_cmd_send(priv, buf, NULL,
+                          WSM_SWITCH_CHANNEL_REQ_ID, WSM_CMD_TIMEOUT);
+       if (ret)
+               priv->channel_switch_in_progress = 0;
+
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+       priv->ps_mode_switch_in_progress = 1;
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT8(buf, arg->mode);
+       WSM_PUT8(buf, arg->fast_psm_idle_period);
+       WSM_PUT8(buf, arg->ap_psm_change_period);
+       WSM_PUT8(buf, arg->min_auto_pspoll_period);
+
+       ret = wsm_cmd_send(priv, buf, NULL,
+                          WSM_SET_PM_REQ_ID, WSM_CMD_TIMEOUT);
+
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT8(buf, arg->mode);
+       WSM_PUT8(buf, arg->band);
+       WSM_PUT16(buf, arg->channel_number);
+       WSM_PUT32(buf, arg->ct_window);
+       WSM_PUT32(buf, arg->beacon_interval);
+       WSM_PUT8(buf, arg->dtim_period);
+       WSM_PUT8(buf, arg->preamble);
+       WSM_PUT8(buf, arg->probe_delay);
+       WSM_PUT8(buf, arg->ssid_len);
+       WSM_PUT(buf, arg->ssid, sizeof(arg->ssid));
+       WSM_PUT32(buf, arg->basic_rate_set);
+
+       priv->tx_burst_idx = -1;
+       ret = wsm_cmd_send(priv, buf, NULL,
+                          WSM_START_REQ_ID, WSM_CMD_START_TIMEOUT);
+
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_beacon_transmit(struct cw1200_common *priv,
+                       const struct wsm_beacon_transmit *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT32(buf, arg->enable_beaconing ? 1 : 0);
+
+       ret = wsm_cmd_send(priv, buf, NULL,
+                          WSM_BEACON_TRANSMIT_REQ_ID, WSM_CMD_TIMEOUT);
+
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_start_find(struct cw1200_common *priv)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+       wsm_cmd_lock(priv);
+       ret = wsm_cmd_send(priv, buf, NULL, 0x0019, WSM_CMD_TIMEOUT);
+       wsm_cmd_unlock(priv);
+       return ret;
+}
+
+/* ******************************************************************** */
+
+int wsm_stop_find(struct cw1200_common *priv)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+       wsm_cmd_lock(priv);
+       ret = wsm_cmd_send(priv, buf, NULL, 0x001A, WSM_CMD_TIMEOUT);
+       wsm_cmd_unlock(priv);
+       return ret;
+}
+
+/* ******************************************************************** */
+
+int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+       u16 cmd = 0x001C | WSM_TX_LINK_ID(arg->link_id);
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT(buf, &arg->mac_addr[0], sizeof(arg->mac_addr));
+       WSM_PUT16(buf, 0);
+
+       ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_TIMEOUT);
+
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+
+int wsm_update_ie(struct cw1200_common *priv,
+                 const struct wsm_update_ie *arg)
+{
+       int ret;
+       struct wsm_buf *buf = &priv->wsm_cmd_buf;
+
+       wsm_cmd_lock(priv);
+
+       WSM_PUT16(buf, arg->what);
+       WSM_PUT16(buf, arg->count);
+       WSM_PUT(buf, arg->ies, arg->length);
+
+       ret = wsm_cmd_send(priv, buf, NULL, 0x001B, WSM_CMD_TIMEOUT);
+
+       wsm_cmd_unlock(priv);
+       return ret;
+
+nomem:
+       wsm_cmd_unlock(priv);
+       return -ENOMEM;
+}
+
+/* ******************************************************************** */
+int wsm_set_probe_responder(struct cw1200_common *priv, bool enable)
+{
+       priv->rx_filter.probeResponder = enable;
+       return wsm_set_rx_filter(priv, &priv->rx_filter);
+}
+
+/* ******************************************************************** */
+/* WSM indication events implementation                                        */
+const char * const cw1200_fw_types[] = {
+       "ETF",
+       "WFM",
+       "WSM",
+       "HI test",
+       "Platform test"
+};
+
+static int wsm_startup_indication(struct cw1200_common *priv,
+                                       struct wsm_buf *buf)
+{
+       priv->wsm_caps.input_buffers     = WSM_GET16(buf);
+       priv->wsm_caps.input_buffer_size = WSM_GET16(buf);
+       priv->wsm_caps.hw_id      = WSM_GET16(buf);
+       priv->wsm_caps.hw_subid   = WSM_GET16(buf);
+       priv->wsm_caps.status     = WSM_GET16(buf);
+       priv->wsm_caps.fw_cap     = WSM_GET16(buf);
+       priv->wsm_caps.fw_type    = WSM_GET16(buf);
+       priv->wsm_caps.fw_api     = WSM_GET16(buf);
+       priv->wsm_caps.fw_build   = WSM_GET16(buf);
+       priv->wsm_caps.fw_ver     = WSM_GET16(buf);
+       WSM_GET(buf, priv->wsm_caps.fw_label, sizeof(priv->wsm_caps.fw_label));
+       priv->wsm_caps.fw_label[sizeof(priv->wsm_caps.fw_label) - 1] = 0; /* Do not trust FW too much... */
+
+       if (WARN_ON(priv->wsm_caps.status))
+               return -EINVAL;
+
+       if (WARN_ON(priv->wsm_caps.fw_type > 4))
+               return -EINVAL;
+
+       pr_info("CW1200 WSM init done.\n"
+               "   Input buffers: %d x %d bytes\n"
+               "   Hardware: %d.%d\n"
+               "   %s firmware [%s], ver: %d, build: %d,"
+               "   api: %d, cap: 0x%.4X\n",
+               priv->wsm_caps.input_buffers,
+               priv->wsm_caps.input_buffer_size,
+               priv->wsm_caps.hw_id, priv->wsm_caps.hw_subid,
+               cw1200_fw_types[priv->wsm_caps.fw_type],
+               priv->wsm_caps.fw_label, priv->wsm_caps.fw_ver,
+               priv->wsm_caps.fw_build,
+               priv->wsm_caps.fw_api, priv->wsm_caps.fw_cap);
+
+       /* Disable unsupported frequency bands */
+       if (!(priv->wsm_caps.fw_cap & 0x1))
+               priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
+       if (!(priv->wsm_caps.fw_cap & 0x2))
+               priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+
+       priv->firmware_ready = 1;
+       wake_up(&priv->wsm_startup_done);
+       return 0;
+
+underflow:
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+static int wsm_receive_indication(struct cw1200_common *priv,
+                                 int link_id,
+                                 struct wsm_buf *buf,
+                                 struct sk_buff **skb_p)
+{
+       struct wsm_rx rx;
+       struct ieee80211_hdr *hdr;
+       size_t hdr_len;
+       __le16 fctl;
+
+       rx.status = WSM_GET32(buf);
+       rx.channel_number = WSM_GET16(buf);
+       rx.rx_rate = WSM_GET8(buf);
+       rx.rcpi_rssi = WSM_GET8(buf);
+       rx.flags = WSM_GET32(buf);
+
+       /* FW Workaround: Drop probe resp or
+          beacon when RSSI is 0
+       */
+       hdr = (struct ieee80211_hdr *)(*skb_p)->data;
+
+       if (!rx.rcpi_rssi &&
+           (ieee80211_is_probe_resp(hdr->frame_control) ||
+            ieee80211_is_beacon(hdr->frame_control)))
+               return 0;
+
+       /* If no RSSI subscription has been made,
+        * convert RCPI to RSSI here
+        */
+       if (!priv->cqm_use_rssi)
+               rx.rcpi_rssi = rx.rcpi_rssi / 2 - 110;
+
+       fctl = *(__le16 *)buf->data;
+       hdr_len = buf->data - buf->begin;
+       skb_pull(*skb_p, hdr_len);
+       if (!rx.status && ieee80211_is_deauth(fctl)) {
+               if (priv->join_status == CW1200_JOIN_STATUS_STA) {
+                       /* Shedule unjoin work */
+                       pr_debug("[WSM] Issue unjoin command (RX).\n");
+                       wsm_lock_tx_async(priv);
+                       if (queue_work(priv->workqueue,
+                                      &priv->unjoin_work) <= 0)
+                               wsm_unlock_tx(priv);
+               }
+       }
+       cw1200_rx_cb(priv, &rx, link_id, skb_p);
+       if (*skb_p)
+               skb_push(*skb_p, hdr_len);
+
+       return 0;
+
+underflow:
+       return -EINVAL;
+}
+
+static int wsm_event_indication(struct cw1200_common *priv, struct wsm_buf *buf)
+{
+       int first;
+       struct cw1200_wsm_event *event;
+
+       if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) {
+               /* STA is stopped. */
+               return 0;
+       }
+
+       event = kzalloc(sizeof(struct cw1200_wsm_event), GFP_KERNEL);
+       if (!event)
+               return -ENOMEM;
+
+       event->evt.id = WSM_GET32(buf);
+       event->evt.data = WSM_GET32(buf);
+
+       pr_debug("[WSM] Event: %d(%d)\n",
+                event->evt.id, event->evt.data);
+
+       spin_lock(&priv->event_queue_lock);
+       first = list_empty(&priv->event_queue);
+       list_add_tail(&event->link, &priv->event_queue);
+       spin_unlock(&priv->event_queue_lock);
+
+       if (first)
+               queue_work(priv->workqueue, &priv->event_handler);
+
+       return 0;
+
+underflow:
+       kfree(event);
+       return -EINVAL;
+}
+
+static int wsm_channel_switch_indication(struct cw1200_common *priv,
+                                        struct wsm_buf *buf)
+{
+       WARN_ON(WSM_GET32(buf));
+
+       priv->channel_switch_in_progress = 0;
+       wake_up(&priv->channel_switch_done);
+
+       wsm_unlock_tx(priv);
+
+       return 0;
+
+underflow:
+       return -EINVAL;
+}
+
+static int wsm_set_pm_indication(struct cw1200_common *priv,
+                                struct wsm_buf *buf)
+{
+       /* TODO:  Check buf (struct wsm_set_pm_complete) for validity */
+       if (priv->ps_mode_switch_in_progress) {
+               priv->ps_mode_switch_in_progress = 0;
+               wake_up(&priv->ps_mode_switch_done);
+       }
+       return 0;
+}
+
+static int wsm_scan_started(struct cw1200_common *priv, void *arg,
+                           struct wsm_buf *buf)
+{
+       u32 status = WSM_GET32(buf);
+       if (status != WSM_STATUS_SUCCESS) {
+               cw1200_scan_failed_cb(priv);
+               return -EINVAL;
+       }
+       return 0;
+
+underflow:
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+static int wsm_scan_complete_indication(struct cw1200_common *priv,
+                                       struct wsm_buf *buf)
+{
+       struct wsm_scan_complete arg;
+       arg.status = WSM_GET32(buf);
+       arg.psm = WSM_GET8(buf);
+       arg.num_channels = WSM_GET8(buf);
+       cw1200_scan_complete_cb(priv, &arg);
+
+       return 0;
+
+underflow:
+       return -EINVAL;
+}
+
+static int wsm_join_complete_indication(struct cw1200_common *priv,
+                                       struct wsm_buf *buf)
+{
+       struct wsm_join_complete arg;
+       arg.status = WSM_GET32(buf);
+       pr_debug("[WSM] Join complete indication, status: %d\n", arg.status);
+       cw1200_join_complete_cb(priv, &arg);
+
+       return 0;
+
+underflow:
+       return -EINVAL;
+}
+
+static int wsm_find_complete_indication(struct cw1200_common *priv,
+                                       struct wsm_buf *buf)
+{
+       pr_warn("Implement find_complete_indication\n");
+       return 0;
+}
+
+static int wsm_ba_timeout_indication(struct cw1200_common *priv,
+                                    struct wsm_buf *buf)
+{
+       u32 dummy;
+       u8 tid;
+       u8 dummy2;
+       u8 addr[ETH_ALEN];
+
+       dummy = WSM_GET32(buf);
+       tid = WSM_GET8(buf);
+       dummy2 = WSM_GET8(buf);
+       WSM_GET(buf, addr, ETH_ALEN);
+
+       pr_info("BlockACK timeout, tid %d, addr %pM\n",
+               tid, addr);
+
+       return 0;
+
+underflow:
+       return -EINVAL;
+}
+
+static int wsm_suspend_resume_indication(struct cw1200_common *priv,
+                                        int link_id, struct wsm_buf *buf)
+{
+       u32 flags;
+       struct wsm_suspend_resume arg;
+
+       flags = WSM_GET32(buf);
+       arg.link_id = link_id;
+       arg.stop = !(flags & 1);
+       arg.multicast = !!(flags & 8);
+       arg.queue = (flags >> 1) & 3;
+
+       cw1200_suspend_resume(priv, &arg);
+
+       return 0;
+
+underflow:
+       return -EINVAL;
+}
+
+
+/* ******************************************************************** */
+/* WSM TX                                                              */
+
+static int wsm_cmd_send(struct cw1200_common *priv,
+                       struct wsm_buf *buf,
+                       void *arg, u16 cmd, long tmo)
+{
+       size_t buf_len = buf->data - buf->begin;
+       int ret;
+
+       /* Don't bother if we're dead. */
+       if (priv->bh_error) {
+               ret = 0;
+               goto done;
+       }
+
+       /* Block until the cmd buffer is completed.  Tortuous. */
+       spin_lock(&priv->wsm_cmd.lock);
+       while (!priv->wsm_cmd.done) {
+               spin_unlock(&priv->wsm_cmd.lock);
+               spin_lock(&priv->wsm_cmd.lock);
+       }
+       priv->wsm_cmd.done = 0;
+       spin_unlock(&priv->wsm_cmd.lock);
+
+       if (cmd == WSM_WRITE_MIB_REQ_ID ||
+           cmd == WSM_READ_MIB_REQ_ID)
+               pr_debug("[WSM] >>> 0x%.4X [MIB: 0x%.4X] (%zu)\n",
+                        cmd, __le16_to_cpu(((__le16 *)buf->begin)[2]),
+                        buf_len);
+       else
+               pr_debug("[WSM] >>> 0x%.4X (%zu)\n", cmd, buf_len);
+
+       /* Due to buggy SPI on CW1200, we need to
+        * pad the message by a few bytes to ensure
+        * that it's completely received.
+        */
+       buf_len += 4;
+
+       /* Fill HI message header */
+       /* BH will add sequence number */
+       ((__le16 *)buf->begin)[0] = __cpu_to_le16(buf_len);
+       ((__le16 *)buf->begin)[1] = __cpu_to_le16(cmd);
+
+       spin_lock(&priv->wsm_cmd.lock);
+       BUG_ON(priv->wsm_cmd.ptr);
+       priv->wsm_cmd.ptr = buf->begin;
+       priv->wsm_cmd.len = buf_len;
+       priv->wsm_cmd.arg = arg;
+       priv->wsm_cmd.cmd = cmd;
+       spin_unlock(&priv->wsm_cmd.lock);
+
+       cw1200_bh_wakeup(priv);
+
+       /* Wait for command completion */
+       ret = wait_event_timeout(priv->wsm_cmd_wq,
+                                priv->wsm_cmd.done, tmo);
+
+       if (!ret && !priv->wsm_cmd.done) {
+               spin_lock(&priv->wsm_cmd.lock);
+               priv->wsm_cmd.done = 1;
+               priv->wsm_cmd.ptr = NULL;
+               spin_unlock(&priv->wsm_cmd.lock);
+               if (priv->bh_error) {
+                       /* Return ok to help system cleanup */
+                       ret = 0;
+               } else {
+                       pr_err("CMD req (0x%04x) stuck in firmware, killing BH\n", priv->wsm_cmd.cmd);
+                       print_hex_dump_bytes("REQDUMP: ", DUMP_PREFIX_NONE,
+                                            buf->begin, buf_len);
+                       pr_err("Outstanding outgoing frames:  %d\n", priv->hw_bufs_used);
+
+                       /* Kill BH thread to report the error to the top layer. */
+                       atomic_add(1, &priv->bh_term);
+                       wake_up(&priv->bh_wq);
+                       ret = -ETIMEDOUT;
+               }
+       } else {
+               spin_lock(&priv->wsm_cmd.lock);
+               BUG_ON(!priv->wsm_cmd.done);
+               ret = priv->wsm_cmd.ret;
+               spin_unlock(&priv->wsm_cmd.lock);
+       }
+done:
+       wsm_buf_reset(buf);
+       return ret;
+}
+
+/* ******************************************************************** */
+/* WSM TX port control                                                 */
+
+void wsm_lock_tx(struct cw1200_common *priv)
+{
+       wsm_cmd_lock(priv);
+       if (atomic_add_return(1, &priv->tx_lock) == 1) {
+               if (wsm_flush_tx(priv))
+                       pr_debug("[WSM] TX is locked.\n");
+       }
+       wsm_cmd_unlock(priv);
+}
+
+void wsm_lock_tx_async(struct cw1200_common *priv)
+{
+       if (atomic_add_return(1, &priv->tx_lock) == 1)
+               pr_debug("[WSM] TX is locked (async).\n");
+}
+
+bool wsm_flush_tx(struct cw1200_common *priv)
+{
+       unsigned long timestamp = jiffies;
+       bool pending = false;
+       long timeout;
+       int i;
+
+       /* Flush must be called with TX lock held. */
+       BUG_ON(!atomic_read(&priv->tx_lock));
+
+       /* First check if we really need to do something.
+        * It is safe to use unprotected access, as hw_bufs_used
+        * can only decrements.
+        */
+       if (!priv->hw_bufs_used)
+               return true;
+
+       if (priv->bh_error) {
+               /* In case of failure do not wait for magic. */
+               pr_err("[WSM] Fatal error occured, will not flush TX.\n");
+               return false;
+       } else {
+               /* Get a timestamp of "oldest" frame */
+               for (i = 0; i < 4; ++i)
+                       pending |= cw1200_queue_get_xmit_timestamp(
+                                       &priv->tx_queue[i],
+                                       &timestamp, 0xffffffff);
+               /* If there's nothing pending, we're good */
+               if (!pending)
+                       return true;
+
+               timeout = timestamp + WSM_CMD_LAST_CHANCE_TIMEOUT - jiffies;
+               if (timeout < 0 || wait_event_timeout(priv->bh_evt_wq,
+                                                     !priv->hw_bufs_used,
+                                                     timeout) <= 0) {
+                       /* Hmmm... Not good. Frame had stuck in firmware. */
+                       priv->bh_error = 1;
+                       wiphy_err(priv->hw->wiphy, "[WSM] TX Frames (%d) stuck in firmware, killing BH\n", priv->hw_bufs_used);
+                       wake_up(&priv->bh_wq);
+                       return false;
+               }
+
+               /* Ok, everything is flushed. */
+               return true;
+       }
+}
+
+void wsm_unlock_tx(struct cw1200_common *priv)
+{
+       int tx_lock;
+       tx_lock = atomic_sub_return(1, &priv->tx_lock);
+       BUG_ON(tx_lock < 0);
+
+       if (tx_lock == 0) {
+               if (!priv->bh_error)
+                       cw1200_bh_wakeup(priv);
+               pr_debug("[WSM] TX is unlocked.\n");
+       }
+}
+
+/* ******************************************************************** */
+/* WSM RX                                                              */
+
+int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len)
+{
+       struct wsm_buf buf;
+       u32 reason;
+       u32 reg[18];
+       char fname[48];
+       unsigned int i;
+
+       static const char * const reason_str[] = {
+               "undefined instruction",
+               "prefetch abort",
+               "data abort",
+               "unknown error",
+       };
+
+       buf.begin = buf.data = data;
+       buf.end = &buf.begin[len];
+
+       reason = WSM_GET32(&buf);
+       for (i = 0; i < ARRAY_SIZE(reg); ++i)
+               reg[i] = WSM_GET32(&buf);
+       WSM_GET(&buf, fname, sizeof(fname));
+
+       if (reason < 4)
+               wiphy_err(priv->hw->wiphy,
+                         "Firmware exception: %s.\n",
+                         reason_str[reason]);
+       else
+               wiphy_err(priv->hw->wiphy,
+                         "Firmware assert at %.*s, line %d\n",
+                         (int) sizeof(fname), fname, reg[1]);
+
+       for (i = 0; i < 12; i += 4)
+               wiphy_err(priv->hw->wiphy,
+                         "R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X,\n",
+                         i + 0, reg[i + 0], i + 1, reg[i + 1],
+                         i + 2, reg[i + 2], i + 3, reg[i + 3]);
+       wiphy_err(priv->hw->wiphy,
+                 "R12: 0x%.8X, SP: 0x%.8X, LR: 0x%.8X, PC: 0x%.8X,\n",
+                 reg[i + 0], reg[i + 1], reg[i + 2], reg[i + 3]);
+       i += 4;
+       wiphy_err(priv->hw->wiphy,
+                 "CPSR: 0x%.8X, SPSR: 0x%.8X\n",
+                 reg[i + 0], reg[i + 1]);
+
+       print_hex_dump_bytes("R1: ", DUMP_PREFIX_NONE,
+                            fname, sizeof(fname));
+       return 0;
+
+underflow:
+       wiphy_err(priv->hw->wiphy, "Firmware exception.\n");
+       print_hex_dump_bytes("Exception: ", DUMP_PREFIX_NONE,
+                            data, len);
+       return -EINVAL;
+}
+
+int wsm_handle_rx(struct cw1200_common *priv, u16 id,
+                 struct wsm_hdr *wsm, struct sk_buff **skb_p)
+{
+       int ret = 0;
+       struct wsm_buf wsm_buf;
+       int link_id = (id >> 6) & 0x0F;
+
+       /* Strip link id. */
+       id &= ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX);
+
+       wsm_buf.begin = (u8 *)&wsm[0];
+       wsm_buf.data = (u8 *)&wsm[1];
+       wsm_buf.end = &wsm_buf.begin[__le16_to_cpu(wsm->len)];
+
+       pr_debug("[WSM] <<< 0x%.4X (%td)\n", id,
+                wsm_buf.end - wsm_buf.begin);
+
+       if (id == WSM_TX_CONFIRM_IND_ID) {
+               ret = wsm_tx_confirm(priv, &wsm_buf, link_id);
+       } else if (id == WSM_MULTI_TX_CONFIRM_ID) {
+               ret = wsm_multi_tx_confirm(priv, &wsm_buf, link_id);
+       } else if (id & 0x0400) {
+               void *wsm_arg;
+               u16 wsm_cmd;
+
+               /* Do not trust FW too much. Protection against repeated
+                * response and race condition removal (see above).
+                */
+               spin_lock(&priv->wsm_cmd.lock);
+               wsm_arg = priv->wsm_cmd.arg;
+               wsm_cmd = priv->wsm_cmd.cmd &
+                               ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX);
+               priv->wsm_cmd.cmd = 0xFFFF;
+               spin_unlock(&priv->wsm_cmd.lock);
+
+               if (WARN_ON((id & ~0x0400) != wsm_cmd)) {
+                       /* Note that any non-zero is a fatal retcode. */
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               /* Note that wsm_arg can be NULL in case of timeout in
+                * wsm_cmd_send().
+                */
+
+               switch (id) {
+               case WSM_READ_MIB_RESP_ID:
+                       if (wsm_arg)
+                               ret = wsm_read_mib_confirm(priv, wsm_arg,
+                                                               &wsm_buf);
+                       break;
+               case WSM_WRITE_MIB_RESP_ID:
+                       if (wsm_arg)
+                               ret = wsm_write_mib_confirm(priv, wsm_arg,
+                                                           &wsm_buf);
+                       break;
+               case WSM_START_SCAN_RESP_ID:
+                       if (wsm_arg)
+                               ret = wsm_scan_started(priv, wsm_arg, &wsm_buf);
+                       break;
+               case WSM_CONFIGURATION_RESP_ID:
+                       if (wsm_arg)
+                               ret = wsm_configuration_confirm(priv, wsm_arg,
+                                                               &wsm_buf);
+                       break;
+               case WSM_JOIN_RESP_ID:
+                       if (wsm_arg)
+                               ret = wsm_join_confirm(priv, wsm_arg, &wsm_buf);
+                       break;
+               case WSM_STOP_SCAN_RESP_ID:
+               case WSM_RESET_RESP_ID:
+               case WSM_ADD_KEY_RESP_ID:
+               case WSM_REMOVE_KEY_RESP_ID:
+               case WSM_SET_PM_RESP_ID:
+               case WSM_SET_BSS_PARAMS_RESP_ID:
+               case 0x0412: /* set_tx_queue_params */
+               case WSM_EDCA_PARAMS_RESP_ID:
+               case WSM_SWITCH_CHANNEL_RESP_ID:
+               case WSM_START_RESP_ID:
+               case WSM_BEACON_TRANSMIT_RESP_ID:
+               case 0x0419: /* start_find */
+               case 0x041A: /* stop_find */
+               case 0x041B: /* update_ie */
+               case 0x041C: /* map_link */
+                       WARN_ON(wsm_arg != NULL);
+                       ret = wsm_generic_confirm(priv, wsm_arg, &wsm_buf);
+                       if (ret) {
+                               wiphy_warn(priv->hw->wiphy,
+                                          "wsm_generic_confirm failed for request 0x%04x.\n",
+                                          id & ~0x0400);
+
+                               /* often 0x407 and 0x410 occur, this means we're dead.. */
+                               if (priv->join_status >= CW1200_JOIN_STATUS_JOINING) {
+                                       wsm_lock_tx(priv);
+                                       if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0)
+                                               wsm_unlock_tx(priv);
+                               }
+                       }
+                       break;
+               default:
+                       wiphy_warn(priv->hw->wiphy,
+                                  "Unrecognized confirmation 0x%04x\n",
+                                  id & ~0x0400);
+               }
+
+               spin_lock(&priv->wsm_cmd.lock);
+               priv->wsm_cmd.ret = ret;
+               priv->wsm_cmd.done = 1;
+               spin_unlock(&priv->wsm_cmd.lock);
+
+               ret = 0; /* Error response from device should ne stop BH. */
+
+               wake_up(&priv->wsm_cmd_wq);
+       } else if (id & 0x0800) {
+               switch (id) {
+               case WSM_STARTUP_IND_ID:
+                       ret = wsm_startup_indication(priv, &wsm_buf);
+                       break;
+               case WSM_RECEIVE_IND_ID:
+                       ret = wsm_receive_indication(priv, link_id,
+                                                    &wsm_buf, skb_p);
+                       break;
+               case 0x0805:
+                       ret = wsm_event_indication(priv, &wsm_buf);
+                       break;
+               case WSM_SCAN_COMPLETE_IND_ID:
+                       ret = wsm_scan_complete_indication(priv, &wsm_buf);
+                       break;
+               case 0x0808:
+                       ret = wsm_ba_timeout_indication(priv, &wsm_buf);
+                       break;
+               case 0x0809:
+                       ret = wsm_set_pm_indication(priv, &wsm_buf);
+                       break;
+               case 0x080A:
+                       ret = wsm_channel_switch_indication(priv, &wsm_buf);
+                       break;
+               case 0x080B:
+                       ret = wsm_find_complete_indication(priv, &wsm_buf);
+                       break;
+               case 0x080C:
+                       ret = wsm_suspend_resume_indication(priv,
+                                       link_id, &wsm_buf);
+                       break;
+               case 0x080F:
+                       ret = wsm_join_complete_indication(priv, &wsm_buf);
+                       break;
+               default:
+                       pr_warn("Unrecognised WSM ID %04x\n", id);
+               }
+       } else {
+               WARN_ON(1);
+               ret = -EINVAL;
+       }
+out:
+       return ret;
+}
+
+static bool wsm_handle_tx_data(struct cw1200_common *priv,
+                              struct wsm_tx *wsm,
+                              const struct ieee80211_tx_info *tx_info,
+                              const struct cw1200_txpriv *txpriv,
+                              struct cw1200_queue *queue)
+{
+       bool handled = false;
+       const struct ieee80211_hdr *frame =
+               (struct ieee80211_hdr *)&((u8 *)wsm)[txpriv->offset];
+       __le16 fctl = frame->frame_control;
+       enum {
+               do_probe,
+               do_drop,
+               do_wep,
+               do_tx,
+       } action = do_tx;
+
+       switch (priv->mode) {
+       case NL80211_IFTYPE_STATION:
+               if (priv->join_status == CW1200_JOIN_STATUS_MONITOR)
+                       action = do_tx;
+               else if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA)
+                       action = do_drop;
+               break;
+       case NL80211_IFTYPE_AP:
+               if (!priv->join_status) {
+                       action = do_drop;
+               } else if (!(BIT(txpriv->raw_link_id) &
+                            (BIT(0) | priv->link_id_map))) {
+                       wiphy_warn(priv->hw->wiphy,
+                                  "A frame with expired link id is dropped.\n");
+                       action = do_drop;
+               }
+               if (cw1200_queue_get_generation(wsm->packet_id) >
+                               CW1200_MAX_REQUEUE_ATTEMPTS) {
+                       /* HACK!!! WSM324 firmware has tendency to requeue
+                        * multicast frames in a loop, causing performance
+                        * drop and high power consumption of the driver.
+                        * In this situation it is better just to drop
+                        * the problematic frame.
+                        */
+                       wiphy_warn(priv->hw->wiphy,
+                                  "Too many attempts to requeue a frame; dropped.\n");
+                       action = do_drop;
+               }
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               if (priv->join_status != CW1200_JOIN_STATUS_IBSS)
+                       action = do_drop;
+               break;
+       case NL80211_IFTYPE_MESH_POINT:
+               action = do_tx; /* TODO:  Test me! */
+               break;
+       case NL80211_IFTYPE_MONITOR:
+       default:
+               action = do_drop;
+               break;
+       }
+
+       if (action == do_tx) {
+               if (ieee80211_is_nullfunc(fctl)) {
+                       spin_lock(&priv->bss_loss_lock);
+                       if (priv->bss_loss_state) {
+                               priv->bss_loss_confirm_id = wsm->packet_id;
+                               wsm->queue_id = WSM_QUEUE_VOICE;
+                       }
+                       spin_unlock(&priv->bss_loss_lock);
+               } else if (ieee80211_is_probe_req(fctl)) {
+                       action = do_probe;
+               } else if (ieee80211_is_deauth(fctl) &&
+                          priv->mode != NL80211_IFTYPE_AP) {
+                       pr_debug("[WSM] Issue unjoin command due to tx deauth.\n");
+                       wsm_lock_tx_async(priv);
+                       if (queue_work(priv->workqueue,
+                                      &priv->unjoin_work) <= 0)
+                               wsm_unlock_tx(priv);
+               } else if (ieee80211_has_protected(fctl) &&
+                          tx_info->control.hw_key &&
+                          tx_info->control.hw_key->keyidx != priv->wep_default_key_id &&
+                          (tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+                           tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
+                       action = do_wep;
+               }
+       }
+
+       switch (action) {
+       case do_probe:
+               /* An interesting FW "feature". Device filters probe responses.
+                * The easiest way to get it back is to convert
+                * probe request into WSM start_scan command.
+                */
+               pr_debug("[WSM] Convert probe request to scan.\n");
+               wsm_lock_tx_async(priv);
+               priv->pending_frame_id = wsm->packet_id;
+               if (queue_delayed_work(priv->workqueue,
+                                      &priv->scan.probe_work, 0) <= 0)
+                       wsm_unlock_tx(priv);
+               handled = true;
+               break;
+       case do_drop:
+               pr_debug("[WSM] Drop frame (0x%.4X).\n", fctl);
+               BUG_ON(cw1200_queue_remove(queue, wsm->packet_id));
+               handled = true;
+               break;
+       case do_wep:
+               pr_debug("[WSM] Issue set_default_wep_key.\n");
+               wsm_lock_tx_async(priv);
+               priv->wep_default_key_id = tx_info->control.hw_key->keyidx;
+               priv->pending_frame_id = wsm->packet_id;
+               if (queue_work(priv->workqueue, &priv->wep_key_work) <= 0)
+                       wsm_unlock_tx(priv);
+               handled = true;
+               break;
+       case do_tx:
+               pr_debug("[WSM] Transmit frame.\n");
+               break;
+       default:
+               /* Do nothing */
+               break;
+       }
+       return handled;
+}
+
+static int cw1200_get_prio_queue(struct cw1200_common *priv,
+                                u32 link_id_map, int *total)
+{
+       static const int urgent = BIT(CW1200_LINK_ID_AFTER_DTIM) |
+               BIT(CW1200_LINK_ID_UAPSD);
+       struct wsm_edca_queue_params *edca;
+       unsigned score, best = -1;
+       int winner = -1;
+       int queued;
+       int i;
+
+       /* search for a winner using edca params */
+       for (i = 0; i < 4; ++i) {
+               queued = cw1200_queue_get_num_queued(&priv->tx_queue[i],
+                               link_id_map);
+               if (!queued)
+                       continue;
+               *total += queued;
+               edca = &priv->edca.params[i];
+               score = ((edca->aifns + edca->cwmin) << 16) +
+                       ((edca->cwmax - edca->cwmin) *
+                        (get_random_int() & 0xFFFF));
+               if (score < best && (winner < 0 || i != 3)) {
+                       best = score;
+                       winner = i;
+               }
+       }
+
+       /* override winner if bursting */
+       if (winner >= 0 && priv->tx_burst_idx >= 0 &&
+           winner != priv->tx_burst_idx &&
+           !cw1200_queue_get_num_queued(
+                   &priv->tx_queue[winner],
+                   link_id_map & urgent) &&
+           cw1200_queue_get_num_queued(
+                   &priv->tx_queue[priv->tx_burst_idx],
+                   link_id_map))
+               winner = priv->tx_burst_idx;
+
+       return winner;
+}
+
+static int wsm_get_tx_queue_and_mask(struct cw1200_common *priv,
+                                    struct cw1200_queue **queue_p,
+                                    u32 *tx_allowed_mask_p,
+                                    bool *more)
+{
+       int idx;
+       u32 tx_allowed_mask;
+       int total = 0;
+
+       /* Search for a queue with multicast frames buffered */
+       if (priv->tx_multicast) {
+               tx_allowed_mask = BIT(CW1200_LINK_ID_AFTER_DTIM);
+               idx = cw1200_get_prio_queue(priv,
+                               tx_allowed_mask, &total);
+               if (idx >= 0) {
+                       *more = total > 1;
+                       goto found;
+               }
+       }
+
+       /* Search for unicast traffic */
+       tx_allowed_mask = ~priv->sta_asleep_mask;
+       tx_allowed_mask |= BIT(CW1200_LINK_ID_UAPSD);
+       if (priv->sta_asleep_mask) {
+               tx_allowed_mask |= priv->pspoll_mask;
+               tx_allowed_mask &= ~BIT(CW1200_LINK_ID_AFTER_DTIM);
+       } else {
+               tx_allowed_mask |= BIT(CW1200_LINK_ID_AFTER_DTIM);
+       }
+       idx = cw1200_get_prio_queue(priv,
+                       tx_allowed_mask, &total);
+       if (idx < 0)
+               return -ENOENT;
+
+found:
+       *queue_p = &priv->tx_queue[idx];
+       *tx_allowed_mask_p = tx_allowed_mask;
+       return 0;
+}
+
+int wsm_get_tx(struct cw1200_common *priv, u8 **data,
+              size_t *tx_len, int *burst)
+{
+       struct wsm_tx *wsm = NULL;
+       struct ieee80211_tx_info *tx_info;
+       struct cw1200_queue *queue = NULL;
+       int queue_num;
+       u32 tx_allowed_mask = 0;
+       const struct cw1200_txpriv *txpriv = NULL;
+       int count = 0;
+
+       /* More is used only for broadcasts. */
+       bool more = false;
+
+       if (priv->wsm_cmd.ptr) { /* CMD request */
+               ++count;
+               spin_lock(&priv->wsm_cmd.lock);
+               BUG_ON(!priv->wsm_cmd.ptr);
+               *data = priv->wsm_cmd.ptr;
+               *tx_len = priv->wsm_cmd.len;
+               *burst = 1;
+               spin_unlock(&priv->wsm_cmd.lock);
+       } else {
+               for (;;) {
+                       int ret;
+
+                       if (atomic_add_return(0, &priv->tx_lock))
+                               break;
+
+                       spin_lock_bh(&priv->ps_state_lock);
+
+                       ret = wsm_get_tx_queue_and_mask(priv, &queue,
+                                                       &tx_allowed_mask, &more);
+                       queue_num = queue - priv->tx_queue;
+
+                       if (priv->buffered_multicasts &&
+                           (ret || !more) &&
+                           (priv->tx_multicast || !priv->sta_asleep_mask)) {
+                               priv->buffered_multicasts = false;
+                               if (priv->tx_multicast) {
+                                       priv->tx_multicast = false;
+                                       queue_work(priv->workqueue,
+                                                  &priv->multicast_stop_work);
+                               }
+                       }
+
+                       spin_unlock_bh(&priv->ps_state_lock);
+
+                       if (ret)
+                               break;
+
+                       if (cw1200_queue_get(queue,
+                                            tx_allowed_mask,
+                                            &wsm, &tx_info, &txpriv))
+                               continue;
+
+                       if (wsm_handle_tx_data(priv, wsm,
+                                              tx_info, txpriv, queue))
+                               continue;  /* Handled by WSM */
+
+                       wsm->hdr.id &= __cpu_to_le16(
+                               ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX));
+                       wsm->hdr.id |= cpu_to_le16(
+                               WSM_TX_LINK_ID(txpriv->raw_link_id));
+                       priv->pspoll_mask &= ~BIT(txpriv->raw_link_id);
+
+                       *data = (u8 *)wsm;
+                       *tx_len = __le16_to_cpu(wsm->hdr.len);
+
+                       /* allow bursting if txop is set */
+                       if (priv->edca.params[queue_num].txop_limit)
+                               *burst = min(*burst,
+                                            (int)cw1200_queue_get_num_queued(queue, tx_allowed_mask) + 1);
+                       else
+                               *burst = 1;
+
+                       /* store index of bursting queue */
+                       if (*burst > 1)
+                               priv->tx_burst_idx = queue_num;
+                       else
+                               priv->tx_burst_idx = -1;
+
+                       if (more) {
+                               struct ieee80211_hdr *hdr =
+                                       (struct ieee80211_hdr *)
+                                       &((u8 *)wsm)[txpriv->offset];
+                               /* more buffered multicast/broadcast frames
+                                *  ==> set MoreData flag in IEEE 802.11 header
+                                *  to inform PS STAs
+                                */
+                               hdr->frame_control |=
+                                       cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+                       }
+
+                       pr_debug("[WSM] >>> 0x%.4X (%zu) %p %c\n",
+                                0x0004, *tx_len, *data,
+                                wsm->more ? 'M' : ' ');
+                       ++count;
+                       break;
+               }
+       }
+
+       return count;
+}
+
+void wsm_txed(struct cw1200_common *priv, u8 *data)
+{
+       if (data == priv->wsm_cmd.ptr) {
+               spin_lock(&priv->wsm_cmd.lock);
+               priv->wsm_cmd.ptr = NULL;
+               spin_unlock(&priv->wsm_cmd.lock);
+       }
+}
+
+/* ******************************************************************** */
+/* WSM buffer                                                          */
+
+void wsm_buf_init(struct wsm_buf *buf)
+{
+       BUG_ON(buf->begin);
+       buf->begin = kmalloc(FWLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA);
+       buf->end = buf->begin ? &buf->begin[FWLOAD_BLOCK_SIZE] : buf->begin;
+       wsm_buf_reset(buf);
+}
+
+void wsm_buf_deinit(struct wsm_buf *buf)
+{
+       kfree(buf->begin);
+       buf->begin = buf->data = buf->end = NULL;
+}
+
+static void wsm_buf_reset(struct wsm_buf *buf)
+{
+       if (buf->begin) {
+               buf->data = &buf->begin[4];
+               *(u32 *)buf->begin = 0;
+       } else {
+               buf->data = buf->begin;
+       }
+}
+
+static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size)
+{
+       size_t pos = buf->data - buf->begin;
+       size_t size = pos + extra_size;
+
+       size = round_up(size, FWLOAD_BLOCK_SIZE);
+
+       buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA);
+       if (buf->begin) {
+               buf->data = &buf->begin[pos];
+               buf->end = &buf->begin[size];
+               return 0;
+       } else {
+               buf->end = buf->data = buf->begin;
+               return -ENOMEM;
+       }
+}
diff --git a/drivers/net/wireless/cw1200/wsm.h b/drivers/net/wireless/cw1200/wsm.h
new file mode 100644 (file)
index 0000000..7afc613
--- /dev/null
@@ -0,0 +1,1870 @@
+/*
+ * WSM host interface (HI) interface for ST-Ericsson CW1200 mac80211 drivers
+ *
+ * Copyright (c) 2010, ST-Ericsson
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
+ *
+ * Based on CW1200 UMAC WSM API, which is
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Stewart Mathers <stewart.mathers@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CW1200_WSM_H_INCLUDED
+#define CW1200_WSM_H_INCLUDED
+
+#include <linux/spinlock.h>
+
+struct cw1200_common;
+
+/* Bands */
+/* Radio band 2.412 -2.484 GHz. */
+#define WSM_PHY_BAND_2_4G              (0)
+
+/* Radio band 4.9375-5.8250 GHz. */
+#define WSM_PHY_BAND_5G                        (1)
+
+/* Transmit rates */
+/* 1   Mbps            ERP-DSSS */
+#define WSM_TRANSMIT_RATE_1            (0)
+
+/* 2   Mbps            ERP-DSSS */
+#define WSM_TRANSMIT_RATE_2            (1)
+
+/* 5.5 Mbps            ERP-CCK */
+#define WSM_TRANSMIT_RATE_5            (2)
+
+/* 11  Mbps            ERP-CCK */
+#define WSM_TRANSMIT_RATE_11           (3)
+
+/* 22  Mbps            ERP-PBCC (Not supported) */
+/* #define WSM_TRANSMIT_RATE_22                (4) */
+
+/* 33  Mbps            ERP-PBCC (Not supported) */
+/* #define WSM_TRANSMIT_RATE_33                (5) */
+
+/* 6   Mbps   (3 Mbps) ERP-OFDM, BPSK coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_6            (6)
+
+/* 9   Mbps (4.5 Mbps) ERP-OFDM, BPSK coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_9            (7)
+
+/* 12  Mbps  (6 Mbps)  ERP-OFDM, QPSK coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_12           (8)
+
+/* 18  Mbps  (9 Mbps)  ERP-OFDM, QPSK coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_18           (9)
+
+/* 24  Mbps (12 Mbps)  ERP-OFDM, 16QAM coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_24           (10)
+
+/* 36  Mbps (18 Mbps)  ERP-OFDM, 16QAM coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_36           (11)
+
+/* 48  Mbps (24 Mbps)  ERP-OFDM, 64QAM coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_48           (12)
+
+/* 54  Mbps (27 Mbps)  ERP-OFDM, 64QAM coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_54           (13)
+
+/* 6.5 Mbps            HT-OFDM, BPSK coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_HT_6         (14)
+
+/* 13  Mbps            HT-OFDM, QPSK coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_HT_13                (15)
+
+/* 19.5 Mbps           HT-OFDM, QPSK coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_HT_19                (16)
+
+/* 26  Mbps            HT-OFDM, 16QAM coding rate 1/2 */
+#define WSM_TRANSMIT_RATE_HT_26                (17)
+
+/* 39  Mbps            HT-OFDM, 16QAM coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_HT_39                (18)
+
+/* 52  Mbps            HT-OFDM, 64QAM coding rate 2/3 */
+#define WSM_TRANSMIT_RATE_HT_52                (19)
+
+/* 58.5 Mbps           HT-OFDM, 64QAM coding rate 3/4 */
+#define WSM_TRANSMIT_RATE_HT_58                (20)
+
+/* 65  Mbps            HT-OFDM, 64QAM coding rate 5/6 */
+#define WSM_TRANSMIT_RATE_HT_65                (21)
+
+/* Scan types */
+/* Foreground scan */
+#define WSM_SCAN_TYPE_FOREGROUND       (0)
+
+/* Background scan */
+#define WSM_SCAN_TYPE_BACKGROUND       (1)
+
+/* Auto scan */
+#define WSM_SCAN_TYPE_AUTO             (2)
+
+/* Scan flags */
+/* Forced background scan means if the station cannot */
+/* enter the power-save mode, it shall force to perform a */
+/* background scan. Only valid when ScanType is */
+/* background scan. */
+#define WSM_SCAN_FLAG_FORCE_BACKGROUND (BIT(0))
+
+/* The WLAN device scans one channel at a time so */
+/* that disturbance to the data traffic is minimized. */
+#define WSM_SCAN_FLAG_SPLIT_METHOD     (BIT(1))
+
+/* Preamble Type. Long if not set. */
+#define WSM_SCAN_FLAG_SHORT_PREAMBLE   (BIT(2))
+
+/* 11n Tx Mode. Mixed if not set. */
+#define WSM_SCAN_FLAG_11N_GREENFIELD   (BIT(3))
+
+/* Scan constraints */
+/* Maximum number of channels to be scanned. */
+#define WSM_SCAN_MAX_NUM_OF_CHANNELS   (48)
+
+/* The maximum number of SSIDs that the device can scan for. */
+#define WSM_SCAN_MAX_NUM_OF_SSIDS      (2)
+
+/* Power management modes */
+/* 802.11 Active mode */
+#define WSM_PSM_ACTIVE                 (0)
+
+/* 802.11 PS mode */
+#define WSM_PSM_PS                     BIT(0)
+
+/* Fast Power Save bit */
+#define WSM_PSM_FAST_PS_FLAG           BIT(7)
+
+/* Dynamic aka Fast power save */
+#define WSM_PSM_FAST_PS                        (BIT(0) | BIT(7))
+
+/* Undetermined */
+/* Note : Undetermined status is reported when the */
+/* NULL data frame used to advertise the PM mode to */
+/* the AP at Pre or Post Background Scan is not Acknowledged */
+#define WSM_PSM_UNKNOWN                        BIT(1)
+
+/* Queue IDs */
+/* best effort/legacy */
+#define WSM_QUEUE_BEST_EFFORT          (0)
+
+/* background */
+#define WSM_QUEUE_BACKGROUND           (1)
+
+/* video */
+#define WSM_QUEUE_VIDEO                        (2)
+
+/* voice */
+#define WSM_QUEUE_VOICE                        (3)
+
+/* HT TX parameters */
+/* Non-HT */
+#define WSM_HT_TX_NON_HT               (0)
+
+/* Mixed format */
+#define WSM_HT_TX_MIXED                        (1)
+
+/* Greenfield format */
+#define WSM_HT_TX_GREENFIELD           (2)
+
+/* STBC allowed */
+#define WSM_HT_TX_STBC                 (BIT(7))
+
+/* EPTA prioirty flags for BT Coex */
+/* default epta priority */
+#define WSM_EPTA_PRIORITY_DEFAULT      4
+/* use for normal data */
+#define WSM_EPTA_PRIORITY_DATA         4
+/* use for connect/disconnect/roaming*/
+#define WSM_EPTA_PRIORITY_MGT          5
+/* use for action frames */
+#define WSM_EPTA_PRIORITY_ACTION       5
+/* use for AC_VI data */
+#define WSM_EPTA_PRIORITY_VIDEO                5
+/* use for AC_VO data */
+#define WSM_EPTA_PRIORITY_VOICE                6
+/* use for EAPOL exchange */
+#define WSM_EPTA_PRIORITY_EAPOL                7
+
+/* TX status */
+/* Frame was sent aggregated */
+/* Only valid for WSM_SUCCESS status. */
+#define WSM_TX_STATUS_AGGREGATION      (BIT(0))
+
+/* Host should requeue this frame later. */
+/* Valid only when status is WSM_REQUEUE. */
+#define WSM_TX_STATUS_REQUEUE          (BIT(1))
+
+/* Normal Ack */
+#define WSM_TX_STATUS_NORMAL_ACK       (0<<2)
+
+/* No Ack */
+#define WSM_TX_STATUS_NO_ACK           (1<<2)
+
+/* No explicit acknowledgement */
+#define WSM_TX_STATUS_NO_EXPLICIT_ACK  (2<<2)
+
+/* Block Ack */
+/* Only valid for WSM_SUCCESS status. */
+#define WSM_TX_STATUS_BLOCK_ACK                (3<<2)
+
+/* RX status */
+/* Unencrypted */
+#define WSM_RX_STATUS_UNENCRYPTED      (0<<0)
+
+/* WEP */
+#define WSM_RX_STATUS_WEP              (1<<0)
+
+/* TKIP */
+#define WSM_RX_STATUS_TKIP             (2<<0)
+
+/* AES */
+#define WSM_RX_STATUS_AES              (3<<0)
+
+/* WAPI */
+#define WSM_RX_STATUS_WAPI             (4<<0)
+
+/* Macro to fetch encryption subfield. */
+#define WSM_RX_STATUS_ENCRYPTION(status) ((status) & 0x07)
+
+/* Frame was part of an aggregation */
+#define WSM_RX_STATUS_AGGREGATE                (BIT(3))
+
+/* Frame was first in the aggregation */
+#define WSM_RX_STATUS_AGGREGATE_FIRST  (BIT(4))
+
+/* Frame was last in the aggregation */
+#define WSM_RX_STATUS_AGGREGATE_LAST   (BIT(5))
+
+/* Indicates a defragmented frame */
+#define WSM_RX_STATUS_DEFRAGMENTED     (BIT(6))
+
+/* Indicates a Beacon frame */
+#define WSM_RX_STATUS_BEACON           (BIT(7))
+
+/* Indicates STA bit beacon TIM field */
+#define WSM_RX_STATUS_TIM              (BIT(8))
+
+/* Indicates Beacon frame's virtual bitmap contains multicast bit */
+#define WSM_RX_STATUS_MULTICAST                (BIT(9))
+
+/* Indicates frame contains a matching SSID */
+#define WSM_RX_STATUS_MATCHING_SSID    (BIT(10))
+
+/* Indicates frame contains a matching BSSI */
+#define WSM_RX_STATUS_MATCHING_BSSI    (BIT(11))
+
+/* Indicates More bit set in Framectl field */
+#define WSM_RX_STATUS_MORE_DATA                (BIT(12))
+
+/* Indicates frame received during a measurement process */
+#define WSM_RX_STATUS_MEASUREMENT      (BIT(13))
+
+/* Indicates frame received as an HT packet */
+#define WSM_RX_STATUS_HT               (BIT(14))
+
+/* Indicates frame received with STBC */
+#define WSM_RX_STATUS_STBC             (BIT(15))
+
+/* Indicates Address 1 field matches dot11StationId */
+#define WSM_RX_STATUS_ADDRESS1         (BIT(16))
+
+/* Indicates Group address present in the Address 1 field */
+#define WSM_RX_STATUS_GROUP            (BIT(17))
+
+/* Indicates Broadcast address present in the Address 1 field */
+#define WSM_RX_STATUS_BROADCAST                (BIT(18))
+
+/* Indicates group key used with encrypted frames */
+#define WSM_RX_STATUS_GROUP_KEY                (BIT(19))
+
+/* Macro to fetch encryption key index. */
+#define WSM_RX_STATUS_KEY_IDX(status)  (((status >> 20)) & 0x0F)
+
+/* Indicates TSF inclusion after 802.11 frame body */
+#define WSM_RX_STATUS_TSF_INCLUDED     (BIT(24))
+
+/* Frame Control field starts at Frame offset + 2 */
+#define WSM_TX_2BYTES_SHIFT            (BIT(7))
+
+/* Join mode */
+/* IBSS */
+#define WSM_JOIN_MODE_IBSS             (0)
+
+/* BSS */
+#define WSM_JOIN_MODE_BSS              (1)
+
+/* PLCP preamble type */
+/* For long preamble */
+#define WSM_JOIN_PREAMBLE_LONG         (0)
+
+/* For short preamble (Long for 1Mbps) */
+#define WSM_JOIN_PREAMBLE_SHORT                (1)
+
+/* For short preamble (Long for 1 and 2Mbps) */
+#define WSM_JOIN_PREAMBLE_SHORT_2      (2)
+
+/* Join flags */
+/* Unsynchronized */
+#define WSM_JOIN_FLAGS_UNSYNCRONIZED   BIT(0)
+/* The BSS owner is a P2P GO */
+#define WSM_JOIN_FLAGS_P2P_GO          BIT(1)
+/* Force to join BSS with the BSSID and the
+ * SSID specified without waiting for beacons. The
+ * ProbeForJoin parameter is ignored.
+ */
+#define WSM_JOIN_FLAGS_FORCE           BIT(2)
+/* Give probe request/response higher
+ * priority over the BT traffic
+ */
+#define WSM_JOIN_FLAGS_PRIO            BIT(3)
+/* Issue immediate join confirmation and use
+ * join complete to notify about completion
+ */
+#define WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND BIT(5)
+
+/* Key types */
+#define WSM_KEY_TYPE_WEP_DEFAULT       (0)
+#define WSM_KEY_TYPE_WEP_PAIRWISE      (1)
+#define WSM_KEY_TYPE_TKIP_GROUP                (2)
+#define WSM_KEY_TYPE_TKIP_PAIRWISE     (3)
+#define WSM_KEY_TYPE_AES_GROUP         (4)
+#define WSM_KEY_TYPE_AES_PAIRWISE      (5)
+#define WSM_KEY_TYPE_WAPI_GROUP                (6)
+#define WSM_KEY_TYPE_WAPI_PAIRWISE     (7)
+
+/* Key indexes */
+#define WSM_KEY_MAX_INDEX              (10)
+
+/* ACK policy */
+#define WSM_ACK_POLICY_NORMAL          (0)
+#define WSM_ACK_POLICY_NO_ACK          (1)
+
+/* Start modes */
+#define WSM_START_MODE_AP              (0)     /* Mini AP */
+#define WSM_START_MODE_P2P_GO          (1)     /* P2P GO */
+#define WSM_START_MODE_P2P_DEV         (2)     /* P2P device */
+
+/* SetAssociationMode MIB flags */
+#define WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE         (BIT(0))
+#define WSM_ASSOCIATION_MODE_USE_HT_MODE               (BIT(1))
+#define WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET                (BIT(2))
+#define WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING    (BIT(3))
+#define WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES                (BIT(4))
+
+/* RcpiRssiThreshold MIB flags */
+#define WSM_RCPI_RSSI_THRESHOLD_ENABLE (BIT(0))
+#define WSM_RCPI_RSSI_USE_RSSI         (BIT(1))
+#define WSM_RCPI_RSSI_DONT_USE_UPPER   (BIT(2))
+#define WSM_RCPI_RSSI_DONT_USE_LOWER   (BIT(3))
+
+/* Update-ie constants */
+#define WSM_UPDATE_IE_BEACON           (BIT(0))
+#define WSM_UPDATE_IE_PROBE_RESP       (BIT(1))
+#define WSM_UPDATE_IE_PROBE_REQ                (BIT(2))
+
+/* WSM events */
+/* Error */
+#define WSM_EVENT_ERROR                        (0)
+
+/* BSS lost */
+#define WSM_EVENT_BSS_LOST             (1)
+
+/* BSS regained */
+#define WSM_EVENT_BSS_REGAINED         (2)
+
+/* Radar detected */
+#define WSM_EVENT_RADAR_DETECTED       (3)
+
+/* RCPI or RSSI threshold triggered */
+#define WSM_EVENT_RCPI_RSSI            (4)
+
+/* BT inactive */
+#define WSM_EVENT_BT_INACTIVE          (5)
+
+/* BT active */
+#define WSM_EVENT_BT_ACTIVE            (6)
+
+/* MIB IDs */
+/* 4.1  dot11StationId */
+#define WSM_MIB_ID_DOT11_STATION_ID            0x0000
+
+/* 4.2  dot11MaxtransmitMsduLifeTime */
+#define WSM_MIB_ID_DOT11_MAX_TRANSMIT_LIFTIME  0x0001
+
+/* 4.3  dot11MaxReceiveLifeTime */
+#define WSM_MIB_ID_DOT11_MAX_RECEIVE_LIFETIME  0x0002
+
+/* 4.4  dot11SlotTime */
+#define WSM_MIB_ID_DOT11_SLOT_TIME             0x0003
+
+/* 4.5  dot11GroupAddressesTable */
+#define WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE 0x0004
+#define WSM_MAX_GRP_ADDRTABLE_ENTRIES          8
+
+/* 4.6  dot11WepDefaultKeyId */
+#define WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID    0x0005
+
+/* 4.7  dot11CurrentTxPowerLevel */
+#define WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL        0x0006
+
+/* 4.8  dot11RTSThreshold */
+#define WSM_MIB_ID_DOT11_RTS_THRESHOLD         0x0007
+
+/* 4.9  NonErpProtection */
+#define WSM_MIB_ID_NON_ERP_PROTECTION          0x1000
+
+/* 4.10 ArpIpAddressesTable */
+#define WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE      0x1001
+#define WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES       1
+
+/* 4.11 TemplateFrame */
+#define WSM_MIB_ID_TEMPLATE_FRAME              0x1002
+
+/* 4.12 RxFilter */
+#define WSM_MIB_ID_RX_FILTER                   0x1003
+
+/* 4.13 BeaconFilterTable */
+#define WSM_MIB_ID_BEACON_FILTER_TABLE         0x1004
+
+/* 4.14 BeaconFilterEnable */
+#define WSM_MIB_ID_BEACON_FILTER_ENABLE                0x1005
+
+/* 4.15 OperationalPowerMode */
+#define WSM_MIB_ID_OPERATIONAL_POWER_MODE      0x1006
+
+/* 4.16 BeaconWakeUpPeriod */
+#define WSM_MIB_ID_BEACON_WAKEUP_PERIOD                0x1007
+
+/* 4.17 RcpiRssiThreshold */
+#define WSM_MIB_ID_RCPI_RSSI_THRESHOLD         0x1009
+
+/* 4.18 StatisticsTable */
+#define WSM_MIB_ID_STATISTICS_TABLE            0x100A
+
+/* 4.19 IbssPsConfig */
+#define WSM_MIB_ID_IBSS_PS_CONFIG              0x100B
+
+/* 4.20 CountersTable */
+#define WSM_MIB_ID_COUNTERS_TABLE              0x100C
+
+/* 4.21 BlockAckPolicy */
+#define WSM_MIB_ID_BLOCK_ACK_POLICY            0x100E
+
+/* 4.22 OverrideInternalTxRate */
+#define WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE   0x100F
+
+/* 4.23 SetAssociationMode */
+#define WSM_MIB_ID_SET_ASSOCIATION_MODE                0x1010
+
+/* 4.24 UpdateEptaConfigData */
+#define WSM_MIB_ID_UPDATE_EPTA_CONFIG_DATA     0x1011
+
+/* 4.25 SelectCcaMethod */
+#define WSM_MIB_ID_SELECT_CCA_METHOD           0x1012
+
+/* 4.26 SetUpasdInformation */
+#define WSM_MIB_ID_SET_UAPSD_INFORMATION       0x1013
+
+/* 4.27 SetAutoCalibrationMode  WBF00004073 */
+#define WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE   0x1015
+
+/* 4.28 SetTxRateRetryPolicy */
+#define WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY    0x1016
+
+/* 4.29 SetHostMessageTypeFilter */
+#define WSM_MIB_ID_SET_HOST_MSG_TYPE_FILTER    0x1017
+
+/* 4.30 P2PFindInfo */
+#define WSM_MIB_ID_P2P_FIND_INFO               0x1018
+
+/* 4.31 P2PPsModeInfo */
+#define WSM_MIB_ID_P2P_PS_MODE_INFO            0x1019
+
+/* 4.32 SetEtherTypeDataFrameFilter */
+#define WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER 0x101A
+
+/* 4.33 SetUDPPortDataFrameFilter */
+#define WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER        0x101B
+
+/* 4.34 SetMagicDataFrameFilter */
+#define WSM_MIB_ID_SET_MAGIC_DATAFRAME_FILTER  0x101C
+
+/* 4.35 P2PDeviceInfo */
+#define WSM_MIB_ID_P2P_DEVICE_INFO             0x101D
+
+/* 4.36 SetWCDMABand */
+#define WSM_MIB_ID_SET_WCDMA_BAND              0x101E
+
+/* 4.37 GroupTxSequenceCounter */
+#define WSM_MIB_ID_GRP_SEQ_COUNTER             0x101F
+
+/* 4.38 ProtectedMgmtPolicy */
+#define WSM_MIB_ID_PROTECTED_MGMT_POLICY       0x1020
+
+/* 4.39 SetHtProtection */
+#define WSM_MIB_ID_SET_HT_PROTECTION           0x1021
+
+/* 4.40 GPIO Command */
+#define WSM_MIB_ID_GPIO_COMMAND                        0x1022
+
+/* 4.41 TSF Counter Value */
+#define WSM_MIB_ID_TSF_COUNTER                 0x1023
+
+/* Test Purposes Only */
+#define WSM_MIB_ID_BLOCK_ACK_INFO              0x100D
+
+/* 4.42 UseMultiTxConfMessage */
+#define WSM_MIB_USE_MULTI_TX_CONF              0x1024
+
+/* 4.43 Keep-alive period */
+#define WSM_MIB_ID_KEEP_ALIVE_PERIOD           0x1025
+
+/* 4.44 Disable BSSID filter */
+#define WSM_MIB_ID_DISABLE_BSSID_FILTER                0x1026
+
+/* Frame template types */
+#define WSM_FRAME_TYPE_PROBE_REQUEST   (0)
+#define WSM_FRAME_TYPE_BEACON          (1)
+#define WSM_FRAME_TYPE_NULL            (2)
+#define WSM_FRAME_TYPE_QOS_NULL                (3)
+#define WSM_FRAME_TYPE_PS_POLL         (4)
+#define WSM_FRAME_TYPE_PROBE_RESPONSE  (5)
+
+#define WSM_FRAME_GREENFIELD           (0x80)  /* See 4.11 */
+
+/* Status */
+/* The WSM firmware has completed a request */
+/* successfully. */
+#define WSM_STATUS_SUCCESS              (0)
+
+/* This is a generic failure code if other error codes do */
+/* not apply. */
+#define WSM_STATUS_FAILURE              (1)
+
+/* A request contains one or more invalid parameters. */
+#define WSM_INVALID_PARAMETER           (2)
+
+/* The request cannot perform because the device is in */
+/* an inappropriate mode. */
+#define WSM_ACCESS_DENIED               (3)
+
+/* The frame received includes a decryption error. */
+#define WSM_STATUS_DECRYPTFAILURE       (4)
+
+/* A MIC failure is detected in the received packets. */
+#define WSM_STATUS_MICFAILURE           (5)
+
+/* The transmit request failed due to retry limit being */
+/* exceeded. */
+#define WSM_STATUS_RETRY_EXCEEDED       (6)
+
+/* The transmit request failed due to MSDU life time */
+/* being exceeded. */
+#define WSM_STATUS_TX_LIFETIME_EXCEEDED (7)
+
+/* The link to the AP is lost. */
+#define WSM_STATUS_LINK_LOST            (8)
+
+/* No key was found for the encrypted frame */
+#define WSM_STATUS_NO_KEY_FOUND         (9)
+
+/* Jammer was detected when transmitting this frame */
+#define WSM_STATUS_JAMMER_DETECTED      (10)
+
+/* The message should be requeued later. */
+/* This is applicable only to Transmit */
+#define WSM_REQUEUE                     (11)
+
+/* Advanced filtering options */
+#define WSM_MAX_FILTER_ELEMENTS                (4)
+
+#define WSM_FILTER_ACTION_IGNORE       (0)
+#define WSM_FILTER_ACTION_FILTER_IN    (1)
+#define WSM_FILTER_ACTION_FILTER_OUT   (2)
+
+#define WSM_FILTER_PORT_TYPE_DST       (0)
+#define WSM_FILTER_PORT_TYPE_SRC       (1)
+
+/* Actual header of WSM messages */
+struct wsm_hdr {
+       __le16 len;
+       __le16 id;
+};
+
+#define WSM_TX_SEQ_MAX                 (7)
+#define WSM_TX_SEQ(seq)                        \
+               ((seq & WSM_TX_SEQ_MAX) << 13)
+#define WSM_TX_LINK_ID_MAX             (0x0F)
+#define WSM_TX_LINK_ID(link_id)                \
+               ((link_id & WSM_TX_LINK_ID_MAX) << 6)
+
+#define MAX_BEACON_SKIP_TIME_MS 1000
+
+#define WSM_CMD_LAST_CHANCE_TIMEOUT (HZ * 3 / 2)
+
+/* ******************************************************************** */
+/* WSM capability                                                      */
+
+#define WSM_STARTUP_IND_ID 0x0801
+
+struct wsm_startup_ind {
+       u16 input_buffers;
+       u16 input_buffer_size;
+       u16 status;
+       u16 hw_id;
+       u16 hw_subid;
+       u16 fw_cap;
+       u16 fw_type;
+       u16 fw_api;
+       u16 fw_build;
+       u16 fw_ver;
+       char fw_label[128];
+       u32 config[4];
+};
+
+/* ******************************************************************** */
+/* WSM commands                                                                */
+
+/* 3.1 */
+#define WSM_CONFIGURATION_REQ_ID 0x0009
+#define WSM_CONFIGURATION_RESP_ID 0x0409
+
+struct wsm_tx_power_range {
+       int min_power_level;
+       int max_power_level;
+       u32 stepping;
+};
+
+struct wsm_configuration {
+       /* [in] */ u32 dot11MaxTransmitMsduLifeTime;
+       /* [in] */ u32 dot11MaxReceiveLifeTime;
+       /* [in] */ u32 dot11RtsThreshold;
+       /* [in, out] */ u8 *dot11StationId;
+       /* [in] */ const void *dpdData;
+       /* [in] */ size_t dpdData_size;
+       /* [out] */ u8 dot11FrequencyBandsSupported;
+       /* [out] */ u32 supportedRateMask;
+       /* [out] */ struct wsm_tx_power_range txPowerRange[2];
+};
+
+int wsm_configuration(struct cw1200_common *priv,
+                     struct wsm_configuration *arg);
+
+/* 3.3 */
+#define WSM_RESET_REQ_ID 0x000A
+#define WSM_RESET_RESP_ID 0x040A
+struct wsm_reset {
+       /* [in] */ int link_id;
+       /* [in] */ bool reset_statistics;
+};
+
+int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg);
+
+/* 3.5 */
+#define WSM_READ_MIB_REQ_ID 0x0005
+#define WSM_READ_MIB_RESP_ID 0x0405
+int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *buf,
+                size_t buf_size);
+
+/* 3.7 */
+#define WSM_WRITE_MIB_REQ_ID 0x0006
+#define WSM_WRITE_MIB_RESP_ID 0x0406
+int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *buf,
+                 size_t buf_size);
+
+/* 3.9 */
+#define WSM_START_SCAN_REQ_ID 0x0007
+#define WSM_START_SCAN_RESP_ID 0x0407
+
+struct wsm_ssid {
+       u8 ssid[32];
+       u32 length;
+};
+
+struct wsm_scan_ch {
+       u16 number;
+       u32 min_chan_time;
+       u32 max_chan_time;
+       u32 tx_power_level;
+};
+
+struct wsm_scan {
+       /* WSM_PHY_BAND_... */
+       u8 band;
+
+       /* WSM_SCAN_TYPE_... */
+       u8 type;
+
+       /* WSM_SCAN_FLAG_... */
+       u8 flags;
+
+       /* WSM_TRANSMIT_RATE_... */
+       u8 max_tx_rate;
+
+       /* Interval period in TUs that the device shall the re- */
+       /* execute the requested scan. Max value supported by the device */
+       /* is 256s. */
+       u32 auto_scan_interval;
+
+       /* Number of probe requests (per SSID) sent to one (1) */
+       /* channel. Zero (0) means that none is send, which */
+       /* means that a passive scan is to be done. Value */
+       /* greater than zero (0) means that an active scan is to */
+       /* be done. */
+       u32 num_probes;
+
+       /* Number of channels to be scanned. */
+       /* Maximum value is WSM_SCAN_MAX_NUM_OF_CHANNELS. */
+       u8 num_channels;
+
+       /* Number of SSID provided in the scan command (this */
+       /* is zero (0) in broadcast scan) */
+       /* The maximum number of SSIDs is WSM_SCAN_MAX_NUM_OF_SSIDS. */
+       u8 num_ssids;
+
+       /* The delay time (in microseconds) period */
+       /* before sending a probe-request. */
+       u8 probe_delay;
+
+       /* SSIDs to be scanned [numOfSSIDs]; */
+       struct wsm_ssid *ssids;
+
+       /* Channels to be scanned [numOfChannels]; */
+       struct wsm_scan_ch *ch;
+};
+
+int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg);
+
+/* 3.11 */
+#define WSM_STOP_SCAN_REQ_ID 0x0008
+#define WSM_STOP_SCAN_RESP_ID 0x0408
+int wsm_stop_scan(struct cw1200_common *priv);
+
+/* 3.13 */
+#define WSM_SCAN_COMPLETE_IND_ID 0x0806
+struct wsm_scan_complete {
+       /* WSM_STATUS_... */
+       u32 status;
+
+       /* WSM_PSM_... */
+       u8 psm;
+
+       /* Number of channels that the scan operation completed. */
+       u8 num_channels;
+};
+
+/* 3.14 */
+#define WSM_TX_CONFIRM_IND_ID 0x0404
+#define WSM_MULTI_TX_CONFIRM_ID 0x041E
+
+struct wsm_tx_confirm {
+       /* Packet identifier used in wsm_tx. */
+       u32 packet_id;
+
+       /* WSM_STATUS_... */
+       u32 status;
+
+       /* WSM_TRANSMIT_RATE_... */
+       u8 tx_rate;
+
+       /* The number of times the frame was transmitted */
+       /* without receiving an acknowledgement. */
+       u8 ack_failures;
+
+       /* WSM_TX_STATUS_... */
+       u16 flags;
+
+       /* The total time in microseconds that the frame spent in */
+       /* the WLAN device before transmission as completed. */
+       u32 media_delay;
+
+       /* The total time in microseconds that the frame spent in */
+       /* the WLAN device before transmission was started. */
+       u32 tx_queue_delay;
+};
+
+/* 3.15 */
+typedef void (*wsm_tx_confirm_cb) (struct cw1200_common *priv,
+                                  struct wsm_tx_confirm *arg);
+
+/* Note that ideology of wsm_tx struct is different against the rest of
+ * WSM API. wsm_hdr is /not/ a caller-adapted struct to be used as an input
+ * argument for WSM call, but a prepared bytestream to be sent to firmware.
+ * It is filled partly in cw1200_tx, partly in low-level WSM code.
+ * Please pay attention once again: ideology is different.
+ *
+ * Legend:
+ * - [in]: cw1200_tx must fill this field.
+ * - [wsm]: the field is filled by low-level WSM.
+ */
+struct wsm_tx {
+       /* common WSM header */
+       struct wsm_hdr hdr;
+
+       /* Packet identifier that meant to be used in completion. */
+       u32 packet_id;  /* Note this is actually a cookie */
+
+       /* WSM_TRANSMIT_RATE_... */
+       u8 max_tx_rate;
+
+       /* WSM_QUEUE_... */
+       u8 queue_id;
+
+       /* True: another packet is pending on the host for transmission. */
+       u8 more;
+
+       /* Bit 0 = 0 - Start expiry time from first Tx attempt (default) */
+       /* Bit 0 = 1 - Start expiry time from receipt of Tx Request */
+       /* Bits 3:1  - PTA Priority */
+       /* Bits 6:4  - Tx Rate Retry Policy */
+       /* Bit 7 - Reserved */
+       u8 flags;
+
+       /* Should be 0. */
+       u32 reserved;
+
+       /* The elapsed time in TUs, after the initial transmission */
+       /* of an MSDU, after which further attempts to transmit */
+       /* the MSDU shall be terminated. Overrides the global */
+       /* dot11MaxTransmitMsduLifeTime setting [optional] */
+       /* Device will set the default value if this is 0. */
+       u32 expire_time;
+
+       /* WSM_HT_TX_... */
+       __le32 ht_tx_parameters;
+} __packed;
+
+/* = sizeof(generic hi hdr) + sizeof(wsm hdr) + sizeof(alignment) */
+#define WSM_TX_EXTRA_HEADROOM (28)
+
+/* 3.16 */
+#define WSM_RECEIVE_IND_ID 0x0804
+
+struct wsm_rx {
+       /* WSM_STATUS_... */
+       u32 status;
+
+       /* Specifies the channel of the received packet. */
+       u16 channel_number;
+
+       /* WSM_TRANSMIT_RATE_... */
+       u8 rx_rate;
+
+       /* This value is expressed in signed Q8.0 format for */
+       /* RSSI and unsigned Q7.1 format for RCPI. */
+       u8 rcpi_rssi;
+
+       /* WSM_RX_STATUS_... */
+       u32 flags;
+};
+
+/* = sizeof(generic hi hdr) + sizeof(wsm hdr) */
+#define WSM_RX_EXTRA_HEADROOM (16)
+
+typedef void (*wsm_rx_cb) (struct cw1200_common *priv, struct wsm_rx *arg,
+                          struct sk_buff **skb_p);
+
+/* 3.17 */
+struct wsm_event {
+       /* WSM_STATUS_... */
+       /* [out] */ u32 id;
+
+       /* Indication parameters. */
+       /* For error indication, this shall be a 32-bit WSM status. */
+       /* For RCPI or RSSI indication, this should be an 8-bit */
+       /* RCPI or RSSI value. */
+       /* [out] */ u32 data;
+};
+
+struct cw1200_wsm_event {
+       struct list_head link;
+       struct wsm_event evt;
+};
+
+/* 3.18 - 3.22 */
+/* Measurement. Skipped for now. Irrelevent. */
+
+typedef void (*wsm_event_cb) (struct cw1200_common *priv,
+                             struct wsm_event *arg);
+
+/* 3.23 */
+#define WSM_JOIN_REQ_ID 0x000B
+#define WSM_JOIN_RESP_ID 0x040B
+
+struct wsm_join {
+       /* WSM_JOIN_MODE_... */
+       u8 mode;
+
+       /* WSM_PHY_BAND_... */
+       u8 band;
+
+       /* Specifies the channel number to join. The channel */
+       /* number will be mapped to an actual frequency */
+       /* according to the band */
+       u16 channel_number;
+
+       /* Specifies the BSSID of the BSS or IBSS to be joined */
+       /* or the IBSS to be started. */
+       u8 bssid[6];
+
+       /* ATIM window of IBSS */
+       /* When ATIM window is zero the initiated IBSS does */
+       /* not support power saving. */
+       u16 atim_window;
+
+       /* WSM_JOIN_PREAMBLE_... */
+       u8 preamble_type;
+
+       /* Specifies if a probe request should be send with the */
+       /* specified SSID when joining to the network. */
+       u8 probe_for_join;
+
+       /* DTIM Period (In multiples of beacon interval) */
+       u8 dtim_period;
+
+       /* WSM_JOIN_FLAGS_... */
+       u8 flags;
+
+       /* Length of the SSID */
+       u32 ssid_len;
+
+       /* Specifies the SSID of the IBSS to join or start */
+       u8 ssid[32];
+
+       /* Specifies the time between TBTTs in TUs */
+       u32 beacon_interval;
+
+       /* A bit mask that defines the BSS basic rate set. */
+       u32 basic_rate_set;
+};
+
+struct wsm_join_cnf {
+       u32 status;
+
+       /* Minimum transmission power level in units of 0.1dBm */
+       u32 min_power_level;
+
+       /* Maximum transmission power level in units of 0.1dBm */
+       u32 max_power_level;
+};
+
+int wsm_join(struct cw1200_common *priv, struct wsm_join *arg);
+
+/* 3.24 */
+struct wsm_join_complete {
+       /* WSM_STATUS_... */
+       u32 status;
+};
+
+/* 3.25 */
+#define WSM_SET_PM_REQ_ID 0x0010
+#define WSM_SET_PM_RESP_ID 0x0410
+struct wsm_set_pm {
+       /* WSM_PSM_... */
+       u8 mode;
+
+       /* in unit of 500us; 0 to use default */
+       u8 fast_psm_idle_period;
+
+       /* in unit of 500us; 0 to use default */
+       u8 ap_psm_change_period;
+
+       /* in unit of 500us; 0 to disable auto-pspoll */
+       u8 min_auto_pspoll_period;
+};
+
+int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg);
+
+/* 3.27 */
+struct wsm_set_pm_complete {
+       u8 psm;                 /* WSM_PSM_... */
+};
+
+/* 3.28 */
+#define WSM_SET_BSS_PARAMS_REQ_ID 0x0011
+#define WSM_SET_BSS_PARAMS_RESP_ID 0x0411
+struct wsm_set_bss_params {
+       /* This resets the beacon loss counters only */
+       u8 reset_beacon_loss;
+
+       /* The number of lost consecutive beacons after which */
+       /* the WLAN device should indicate the BSS-Lost event */
+       /* to the WLAN host driver. */
+       u8 beacon_lost_count;
+
+       /* The AID received during the association process. */
+       u16 aid;
+
+       /* The operational rate set mask */
+       u32 operational_rate_set;
+};
+
+int wsm_set_bss_params(struct cw1200_common *priv,
+                      const struct wsm_set_bss_params *arg);
+
+/* 3.30 */
+#define WSM_ADD_KEY_REQ_ID         0x000C
+#define WSM_ADD_KEY_RESP_ID        0x040C
+struct wsm_add_key {
+       u8 type;                /* WSM_KEY_TYPE_... */
+       u8 index;               /* Key entry index: 0 -- WSM_KEY_MAX_INDEX */
+       u16 reserved;
+       union {
+               struct {
+                       u8 peer[6];     /* MAC address of the peer station */
+                       u8 reserved;
+                       u8 keylen;              /* Key length in bytes */
+                       u8 keydata[16];         /* Key data */
+               } __packed wep_pairwise;
+               struct {
+                       u8 keyid;       /* Unique per key identifier (0..3) */
+                       u8 keylen;              /* Key length in bytes */
+                       u16 reserved;
+                       u8 keydata[16];         /* Key data */
+               } __packed wep_group;
+               struct {
+                       u8 peer[6];     /* MAC address of the peer station */
+                       u16 reserved;
+                       u8 keydata[16]; /* TKIP key data */
+                       u8 rx_mic_key[8];               /* Rx MIC key */
+                       u8 tx_mic_key[8];               /* Tx MIC key */
+               } __packed tkip_pairwise;
+               struct {
+                       u8 keydata[16]; /* TKIP key data */
+                       u8 rx_mic_key[8];               /* Rx MIC key */
+                       u8 keyid;               /* Key ID */
+                       u8 reserved[3];
+                       u8 rx_seqnum[8];        /* Receive Sequence Counter */
+               } __packed tkip_group;
+               struct {
+                       u8 peer[6];     /* MAC address of the peer station */
+                       u16 reserved;
+                       u8 keydata[16]; /* AES key data */
+               } __packed aes_pairwise;
+               struct {
+                       u8 keydata[16]; /* AES key data */
+                       u8 keyid;               /* Key ID */
+                       u8 reserved[3];
+                       u8 rx_seqnum[8];        /* Receive Sequence Counter */
+               } __packed aes_group;
+               struct {
+                       u8 peer[6];     /* MAC address of the peer station */
+                       u8 keyid;               /* Key ID */
+                       u8 reserved;
+                       u8 keydata[16]; /* WAPI key data */
+                       u8 mic_key[16]; /* MIC key data */
+               } __packed wapi_pairwise;
+               struct {
+                       u8 keydata[16]; /* WAPI key data */
+                       u8 mic_key[16]; /* MIC key data */
+                       u8 keyid;               /* Key ID */
+                       u8 reserved[3];
+               } __packed wapi_group;
+       } __packed;
+} __packed;
+
+int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg);
+
+/* 3.32 */
+#define WSM_REMOVE_KEY_REQ_ID         0x000D
+#define WSM_REMOVE_KEY_RESP_ID        0x040D
+struct wsm_remove_key {
+       u8 index; /* Key entry index : 0-10 */
+};
+
+int wsm_remove_key(struct cw1200_common *priv,
+                  const struct wsm_remove_key *arg);
+
+/* 3.34 */
+struct wsm_set_tx_queue_params {
+       /* WSM_ACK_POLICY_... */
+       u8 ackPolicy;
+
+       /* Medium Time of TSPEC (in 32us units) allowed per */
+       /* One Second Averaging Period for this queue. */
+       u16 allowedMediumTime;
+
+       /* dot11MaxTransmitMsduLifetime to be used for the */
+       /* specified queue. */
+       u32 maxTransmitLifetime;
+};
+
+struct wsm_tx_queue_params {
+       /* NOTE: index is a linux queue id. */
+       struct wsm_set_tx_queue_params params[4];
+};
+
+
+#define WSM_TX_QUEUE_SET(queue_params, queue, ack_policy, allowed_time,\
+               max_life_time)  \
+do {                                                   \
+       struct wsm_set_tx_queue_params *p = &(queue_params)->params[queue]; \
+       p->ackPolicy = (ack_policy);                            \
+       p->allowedMediumTime = (allowed_time);                          \
+       p->maxTransmitLifetime = (max_life_time);                       \
+} while (0)
+
+int wsm_set_tx_queue_params(struct cw1200_common *priv,
+                           const struct wsm_set_tx_queue_params *arg, u8 id);
+
+/* 3.36 */
+#define WSM_EDCA_PARAMS_REQ_ID 0x0013
+#define WSM_EDCA_PARAMS_RESP_ID 0x0413
+struct wsm_edca_queue_params {
+       /* CWmin (in slots) for the access class. */
+       u16 cwmin;
+
+       /* CWmax (in slots) for the access class. */
+       u16 cwmax;
+
+       /* AIFS (in slots) for the access class. */
+       u16 aifns;
+
+       /* TX OP Limit (in microseconds) for the access class. */
+       u16 txop_limit;
+
+       /* dot11MaxReceiveLifetime to be used for the specified */
+       /* the access class. Overrides the global */
+       /* dot11MaxReceiveLifetime value */
+       u32 max_rx_lifetime;
+};
+
+struct wsm_edca_params {
+       /* NOTE: index is a linux queue id. */
+       struct wsm_edca_queue_params params[4];
+       bool uapsd_enable[4];
+};
+
+#define TXOP_UNIT 32
+#define WSM_EDCA_SET(__edca, __queue, __aifs, __cw_min, __cw_max, __txop, __lifetime,\
+                    __uapsd) \
+       do {                                                    \
+               struct wsm_edca_queue_params *p = &(__edca)->params[__queue]; \
+               p->cwmin = __cw_min;                                    \
+               p->cwmax = __cw_max;                                    \
+               p->aifns = __aifs;                                      \
+               p->txop_limit = ((__txop) * TXOP_UNIT);                 \
+               p->max_rx_lifetime = __lifetime;                        \
+               (__edca)->uapsd_enable[__queue] = (__uapsd);            \
+       } while (0)
+
+int wsm_set_edca_params(struct cw1200_common *priv,
+                       const struct wsm_edca_params *arg);
+
+int wsm_set_uapsd_param(struct cw1200_common *priv,
+                       const struct wsm_edca_params *arg);
+
+/* 3.38 */
+/* Set-System info. Skipped for now. Irrelevent. */
+
+/* 3.40 */
+#define WSM_SWITCH_CHANNEL_REQ_ID 0x0016
+#define WSM_SWITCH_CHANNEL_RESP_ID 0x0416
+
+struct wsm_switch_channel {
+       /* 1 - means the STA shall not transmit any further */
+       /* frames until the channel switch has completed */
+       u8 mode;
+
+       /* Number of TBTTs until channel switch occurs. */
+       /* 0 - indicates switch shall occur at any time */
+       /* 1 - occurs immediately before the next TBTT */
+       u8 switch_count;
+
+       /* The new channel number to switch to. */
+       /* Note this is defined as per section 2.7. */
+       u16 channel_number;
+};
+
+int wsm_switch_channel(struct cw1200_common *priv,
+                      const struct wsm_switch_channel *arg);
+
+typedef void (*wsm_channel_switch_cb) (struct cw1200_common *priv);
+
+#define WSM_START_REQ_ID 0x0017
+#define WSM_START_RESP_ID 0x0417
+
+struct wsm_start {
+       /* WSM_START_MODE_... */
+       /* [in] */ u8 mode;
+
+       /* WSM_PHY_BAND_... */
+       /* [in] */ u8 band;
+
+       /* Channel number */
+       /* [in] */ u16 channel_number;
+
+       /* Client Traffic window in units of TU */
+       /* Valid only when mode == ..._P2P */
+       /* [in] */ u32 ct_window;
+
+       /* Interval between two consecutive */
+       /* beacon transmissions in TU. */
+       /* [in] */ u32 beacon_interval;
+
+       /* DTIM period in terms of beacon intervals */
+       /* [in] */ u8 dtim_period;
+
+       /* WSM_JOIN_PREAMBLE_... */
+       /* [in] */ u8 preamble;
+
+       /* The delay time (in microseconds) period */
+       /* before sending a probe-request. */
+       /* [in] */ u8 probe_delay;
+
+       /* Length of the SSID */
+       /* [in] */ u8 ssid_len;
+
+       /* SSID of the BSS or P2P_GO to be started now. */
+       /* [in] */ u8 ssid[32];
+
+       /* The basic supported rates for the MiniAP. */
+       /* [in] */ u32 basic_rate_set;
+};
+
+int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg);
+
+#define WSM_BEACON_TRANSMIT_REQ_ID 0x0018
+#define WSM_BEACON_TRANSMIT_RESP_ID 0x0418
+
+struct wsm_beacon_transmit {
+       /* 1: enable; 0: disable */
+       /* [in] */ u8 enable_beaconing;
+};
+
+int wsm_beacon_transmit(struct cw1200_common *priv,
+                       const struct wsm_beacon_transmit *arg);
+
+int wsm_start_find(struct cw1200_common *priv);
+
+int wsm_stop_find(struct cw1200_common *priv);
+
+typedef void (*wsm_find_complete_cb) (struct cw1200_common *priv, u32 status);
+
+struct wsm_suspend_resume {
+       /* See 3.52 */
+       /* Link ID */
+       /* [out] */ int link_id;
+       /* Stop sending further Tx requests down to device for this link */
+       /* [out] */ bool stop;
+       /* Transmit multicast Frames */
+       /* [out] */ bool multicast;
+       /* The AC on which Tx to be suspended /resumed. */
+       /* This is applicable only for U-APSD */
+       /* WSM_QUEUE_... */
+       /* [out] */ int queue;
+};
+
+typedef void (*wsm_suspend_resume_cb) (struct cw1200_common *priv,
+                                      struct wsm_suspend_resume *arg);
+
+/* 3.54 Update-IE request. */
+struct wsm_update_ie {
+       /* WSM_UPDATE_IE_... */
+       /* [in] */ u16 what;
+       /* [in] */ u16 count;
+       /* [in] */ u8 *ies;
+       /* [in] */ size_t length;
+};
+
+int wsm_update_ie(struct cw1200_common *priv,
+                 const struct wsm_update_ie *arg);
+
+/* 3.56 */
+struct wsm_map_link {
+       /* MAC address of the remote device */
+       /* [in] */ u8 mac_addr[6];
+       /* [in] */ u8 link_id;
+};
+
+int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg);
+
+/* ******************************************************************** */
+/* MIB shortcats                                                       */
+
+static inline int wsm_set_output_power(struct cw1200_common *priv,
+                                      int power_level)
+{
+       __le32 val = __cpu_to_le32(power_level);
+       return wsm_write_mib(priv, WSM_MIB_ID_DOT11_CURRENT_TX_POWER_LEVEL,
+                            &val, sizeof(val));
+}
+
+static inline int wsm_set_beacon_wakeup_period(struct cw1200_common *priv,
+                                              unsigned dtim_interval,
+                                              unsigned listen_interval)
+{
+       struct {
+               u8 numBeaconPeriods;
+               u8 reserved;
+               __le16 listenInterval;
+       } val = {
+               dtim_interval, 0, __cpu_to_le16(listen_interval)
+       };
+
+       if (dtim_interval > 0xFF || listen_interval > 0xFFFF)
+               return -EINVAL;
+       else
+               return wsm_write_mib(priv, WSM_MIB_ID_BEACON_WAKEUP_PERIOD,
+                                    &val, sizeof(val));
+}
+
+struct wsm_rcpi_rssi_threshold {
+       u8 rssiRcpiMode;        /* WSM_RCPI_RSSI_... */
+       u8 lowerThreshold;
+       u8 upperThreshold;
+       u8 rollingAverageCount;
+};
+
+static inline int wsm_set_rcpi_rssi_threshold(struct cw1200_common *priv,
+                                       struct wsm_rcpi_rssi_threshold *arg)
+{
+       return wsm_write_mib(priv, WSM_MIB_ID_RCPI_RSSI_THRESHOLD, arg,
+                            sizeof(*arg));
+}
+
+struct wsm_mib_counters_table {
+       __le32 plcp_errors;
+       __le32 fcs_errors;
+       __le32 tx_packets;
+       __le32 rx_packets;
+       __le32 rx_packet_errors;
+       __le32 rx_decryption_failures;
+       __le32 rx_mic_failures;
+       __le32 rx_no_key_failures;
+       __le32 tx_multicast_frames;
+       __le32 tx_frames_success;
+       __le32 tx_frame_failures;
+       __le32 tx_frames_retried;
+       __le32 tx_frames_multi_retried;
+       __le32 rx_frame_duplicates;
+       __le32 rts_success;
+       __le32 rts_failures;
+       __le32 ack_failures;
+       __le32 rx_multicast_frames;
+       __le32 rx_frames_success;
+       __le32 rx_cmac_icv_errors;
+       __le32 rx_cmac_replays;
+       __le32 rx_mgmt_ccmp_replays;
+} __packed;
+
+static inline int wsm_get_counters_table(struct cw1200_common *priv,
+                                        struct wsm_mib_counters_table *arg)
+{
+       return wsm_read_mib(priv, WSM_MIB_ID_COUNTERS_TABLE,
+                           arg, sizeof(*arg));
+}
+
+static inline int wsm_get_station_id(struct cw1200_common *priv, u8 *mac)
+{
+       return wsm_read_mib(priv, WSM_MIB_ID_DOT11_STATION_ID, mac, ETH_ALEN);
+}
+
+struct wsm_rx_filter {
+       bool promiscuous;
+       bool bssid;
+       bool fcs;
+       bool probeResponder;
+};
+
+static inline int wsm_set_rx_filter(struct cw1200_common *priv,
+                                   const struct wsm_rx_filter *arg)
+{
+       __le32 val = 0;
+       if (arg->promiscuous)
+               val |= __cpu_to_le32(BIT(0));
+       if (arg->bssid)
+               val |= __cpu_to_le32(BIT(1));
+       if (arg->fcs)
+               val |= __cpu_to_le32(BIT(2));
+       if (arg->probeResponder)
+               val |= __cpu_to_le32(BIT(3));
+       return wsm_write_mib(priv, WSM_MIB_ID_RX_FILTER, &val, sizeof(val));
+}
+
+int wsm_set_probe_responder(struct cw1200_common *priv, bool enable);
+
+#define WSM_BEACON_FILTER_IE_HAS_CHANGED       BIT(0)
+#define WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT BIT(1)
+#define WSM_BEACON_FILTER_IE_HAS_APPEARED      BIT(2)
+
+struct wsm_beacon_filter_table_entry {
+       u8      ie_id;
+       u8      flags;
+       u8      oui[3];
+       u8      match_data[3];
+} __packed;
+
+struct wsm_mib_beacon_filter_table {
+       __le32 num;
+       struct wsm_beacon_filter_table_entry entry[10];
+} __packed;
+
+static inline int wsm_set_beacon_filter_table(struct cw1200_common *priv,
+                                             struct wsm_mib_beacon_filter_table *ft)
+{
+       size_t size = __le32_to_cpu(ft->num) *
+                    sizeof(struct wsm_beacon_filter_table_entry) +
+                    sizeof(__le32);
+
+       return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_TABLE, ft, size);
+}
+
+#define WSM_BEACON_FILTER_ENABLE       BIT(0) /* Enable/disable beacon filtering */
+#define WSM_BEACON_FILTER_AUTO_ERP     BIT(1) /* If 1 FW will handle ERP IE changes internally */
+
+struct wsm_beacon_filter_control {
+       int enabled;
+       int bcn_count;
+};
+
+static inline int wsm_beacon_filter_control(struct cw1200_common *priv,
+                                       struct wsm_beacon_filter_control *arg)
+{
+       struct {
+               __le32 enabled;
+               __le32 bcn_count;
+       } val;
+       val.enabled = __cpu_to_le32(arg->enabled);
+       val.bcn_count = __cpu_to_le32(arg->bcn_count);
+       return wsm_write_mib(priv, WSM_MIB_ID_BEACON_FILTER_ENABLE, &val,
+                            sizeof(val));
+}
+
+enum wsm_power_mode {
+       wsm_power_mode_active = 0,
+       wsm_power_mode_doze = 1,
+       wsm_power_mode_quiescent = 2,
+};
+
+struct wsm_operational_mode {
+       enum wsm_power_mode power_mode;
+       int disable_more_flag_usage;
+       int perform_ant_diversity;
+};
+
+static inline int wsm_set_operational_mode(struct cw1200_common *priv,
+                                       const struct wsm_operational_mode *arg)
+{
+       u8 val = arg->power_mode;
+       if (arg->disable_more_flag_usage)
+               val |= BIT(4);
+       if (arg->perform_ant_diversity)
+               val |= BIT(5);
+       return wsm_write_mib(priv, WSM_MIB_ID_OPERATIONAL_POWER_MODE, &val,
+                            sizeof(val));
+}
+
+struct wsm_template_frame {
+       u8 frame_type;
+       u8 rate;
+       struct sk_buff *skb;
+};
+
+static inline int wsm_set_template_frame(struct cw1200_common *priv,
+                                        struct wsm_template_frame *arg)
+{
+       int ret;
+       u8 *p = skb_push(arg->skb, 4);
+       p[0] = arg->frame_type;
+       p[1] = arg->rate;
+       ((__le16 *)p)[1] = __cpu_to_le16(arg->skb->len - 4);
+       ret = wsm_write_mib(priv, WSM_MIB_ID_TEMPLATE_FRAME, p, arg->skb->len);
+       skb_pull(arg->skb, 4);
+       return ret;
+}
+
+
+struct wsm_protected_mgmt_policy {
+       bool protectedMgmtEnable;
+       bool unprotectedMgmtFramesAllowed;
+       bool encryptionForAuthFrame;
+};
+
+static inline int wsm_set_protected_mgmt_policy(struct cw1200_common *priv,
+               struct wsm_protected_mgmt_policy *arg)
+{
+       __le32 val = 0;
+       int ret;
+       if (arg->protectedMgmtEnable)
+               val |= __cpu_to_le32(BIT(0));
+       if (arg->unprotectedMgmtFramesAllowed)
+               val |= __cpu_to_le32(BIT(1));
+       if (arg->encryptionForAuthFrame)
+               val |= __cpu_to_le32(BIT(2));
+       ret = wsm_write_mib(priv, WSM_MIB_ID_PROTECTED_MGMT_POLICY,
+                       &val, sizeof(val));
+       return ret;
+}
+
+struct wsm_mib_block_ack_policy {
+       u8 tx_tid;
+       u8 reserved1;
+       u8 rx_tid;
+       u8 reserved2;
+} __packed;
+
+static inline int wsm_set_block_ack_policy(struct cw1200_common *priv,
+                                          u8 tx_tid_policy,
+                                          u8 rx_tid_policy)
+{
+       struct wsm_mib_block_ack_policy val = {
+               .tx_tid = tx_tid_policy,
+               .rx_tid = rx_tid_policy,
+       };
+       return wsm_write_mib(priv, WSM_MIB_ID_BLOCK_ACK_POLICY, &val,
+                            sizeof(val));
+}
+
+struct wsm_mib_association_mode {
+       u8 flags;               /* WSM_ASSOCIATION_MODE_... */
+       u8 preamble;    /* WSM_JOIN_PREAMBLE_... */
+       u8 greenfield;  /* 1 for greenfield */
+       u8 mpdu_start_spacing;
+       __le32 basic_rate_set;
+} __packed;
+
+static inline int wsm_set_association_mode(struct cw1200_common *priv,
+                                          struct wsm_mib_association_mode *arg)
+{
+       return wsm_write_mib(priv, WSM_MIB_ID_SET_ASSOCIATION_MODE, arg,
+                            sizeof(*arg));
+}
+
+#define WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED BIT(2)
+#define WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT BIT(3)
+struct wsm_tx_rate_retry_policy {
+       u8 index;
+       u8 short_retries;
+       u8 long_retries;
+       /* BIT(2) - Terminate retries when Tx rate retry policy
+        *          finishes.
+        * BIT(3) - Count initial frame transmission as part of
+        *          rate retry counting but not as a retry
+        *          attempt
+        */
+       u8 flags;
+       u8 rate_recoveries;
+       u8 reserved[3];
+       __le32 rate_count_indices[3];
+} __packed;
+
+struct wsm_set_tx_rate_retry_policy {
+       u8 num;
+       u8 reserved[3];
+       struct wsm_tx_rate_retry_policy tbl[8];
+} __packed;
+
+static inline int wsm_set_tx_rate_retry_policy(struct cw1200_common *priv,
+                               struct wsm_set_tx_rate_retry_policy *arg)
+{
+       size_t size = 4 + arg->num * sizeof(struct wsm_tx_rate_retry_policy);
+       return wsm_write_mib(priv, WSM_MIB_ID_SET_TX_RATE_RETRY_POLICY, arg,
+                            size);
+}
+
+/* 4.32 SetEtherTypeDataFrameFilter */
+struct wsm_ether_type_filter_hdr {
+       u8 num;         /* Up to WSM_MAX_FILTER_ELEMENTS */
+       u8 reserved[3];
+} __packed;
+
+struct wsm_ether_type_filter {
+       u8 action;      /* WSM_FILTER_ACTION_XXX */
+       u8 reserved;
+       __le16 type;    /* Type of ethernet frame */
+} __packed;
+
+static inline int wsm_set_ether_type_filter(struct cw1200_common *priv,
+                               struct wsm_ether_type_filter_hdr *arg)
+{
+       size_t size = sizeof(struct wsm_ether_type_filter_hdr) +
+               arg->num * sizeof(struct wsm_ether_type_filter);
+       return wsm_write_mib(priv, WSM_MIB_ID_SET_ETHERTYPE_DATAFRAME_FILTER,
+               arg, size);
+}
+
+/* 4.33 SetUDPPortDataFrameFilter */
+struct wsm_udp_port_filter_hdr {
+       u8 num;         /* Up to WSM_MAX_FILTER_ELEMENTS */
+       u8 reserved[3];
+} __packed;
+
+struct wsm_udp_port_filter {
+       u8 action;      /* WSM_FILTER_ACTION_XXX */
+       u8 type;                /* WSM_FILTER_PORT_TYPE_XXX */
+       __le16 port;            /* Port number */
+} __packed;
+
+static inline int wsm_set_udp_port_filter(struct cw1200_common *priv,
+                               struct wsm_udp_port_filter_hdr *arg)
+{
+       size_t size = sizeof(struct wsm_udp_port_filter_hdr) +
+               arg->num * sizeof(struct wsm_udp_port_filter);
+       return wsm_write_mib(priv, WSM_MIB_ID_SET_UDPPORT_DATAFRAME_FILTER,
+               arg, size);
+}
+
+/* Undocumented MIBs: */
+/* 4.35 P2PDeviceInfo */
+#define D11_MAX_SSID_LEN               (32)
+
+struct wsm_p2p_device_type {
+       __le16 category_id;
+       u8 oui[4];
+       __le16 subcategory_id;
+} __packed;
+
+struct wsm_p2p_device_info {
+       struct wsm_p2p_device_type primaryDevice;
+       u8 reserved1[3];
+       u8 devname_size;
+       u8 local_devname[D11_MAX_SSID_LEN];
+       u8 reserved2[3];
+       u8 num_secdev_supported;
+       struct wsm_p2p_device_type secdevs[0];
+} __packed;
+
+/* 4.36 SetWCDMABand - WO */
+struct wsm_cdma_band {
+       u8 wcdma_band;
+       u8 reserved[3];
+} __packed;
+
+/* 4.37 GroupTxSequenceCounter - RO */
+struct wsm_group_tx_seq {
+       __le32 bits_47_16;
+       __le16 bits_15_00;
+       __le16 reserved;
+} __packed;
+
+/* 4.39 SetHtProtection - WO */
+#define WSM_DUAL_CTS_PROT_ENB          (1 << 0)
+#define WSM_NON_GREENFIELD_STA_PRESENT  (1 << 1)
+#define WSM_HT_PROT_MODE__NO_PROT      (0 << 2)
+#define WSM_HT_PROT_MODE__NON_MEMBER   (1 << 2)
+#define WSM_HT_PROT_MODE__20_MHZ       (2 << 2)
+#define WSM_HT_PROT_MODE__NON_HT_MIXED (3 << 2)
+#define WSM_LSIG_TXOP_PROT_FULL                (1 << 4)
+#define WSM_LARGE_L_LENGTH_PROT                (1 << 5)
+
+struct wsm_ht_protection {
+       __le32 flags;
+} __packed;
+
+/* 4.40 GPIO Command - R/W */
+#define WSM_GPIO_COMMAND_SETUP 0
+#define WSM_GPIO_COMMAND_READ  1
+#define WSM_GPIO_COMMAND_WRITE 2
+#define WSM_GPIO_COMMAND_RESET 3
+#define WSM_GPIO_ALL_PINS      0xFF
+
+struct wsm_gpio_command {
+       u8 command;
+       u8 pin;
+       __le16 config;
+} __packed;
+
+/* 4.41 TSFCounter - RO */
+struct wsm_tsf_counter {
+       __le64 tsf_counter;
+} __packed;
+
+/* 4.43 Keep alive period */
+struct wsm_keep_alive_period {
+       __le16 period;
+       u8 reserved[2];
+} __packed;
+
+static inline int wsm_keep_alive_period(struct cw1200_common *priv,
+                                       int period)
+{
+       struct wsm_keep_alive_period arg = {
+               .period = __cpu_to_le16(period),
+       };
+       return wsm_write_mib(priv, WSM_MIB_ID_KEEP_ALIVE_PERIOD,
+                       &arg, sizeof(arg));
+};
+
+/* BSSID filtering */
+struct wsm_set_bssid_filtering {
+       u8 filter;
+       u8 reserved[3];
+} __packed;
+
+static inline int wsm_set_bssid_filtering(struct cw1200_common *priv,
+                                         bool enabled)
+{
+       struct wsm_set_bssid_filtering arg = {
+               .filter = !enabled,
+       };
+       return wsm_write_mib(priv, WSM_MIB_ID_DISABLE_BSSID_FILTER,
+                       &arg, sizeof(arg));
+}
+
+/* Multicast filtering - 4.5 */
+struct wsm_mib_multicast_filter {
+       __le32 enable;
+       __le32 num_addrs;
+       u8 macaddrs[WSM_MAX_GRP_ADDRTABLE_ENTRIES][ETH_ALEN];
+} __packed;
+
+static inline int wsm_set_multicast_filter(struct cw1200_common *priv,
+                                          struct wsm_mib_multicast_filter *fp)
+{
+       return wsm_write_mib(priv, WSM_MIB_ID_DOT11_GROUP_ADDRESSES_TABLE,
+                            fp, sizeof(*fp));
+}
+
+/* ARP IPv4 filtering - 4.10 */
+struct wsm_mib_arp_ipv4_filter {
+       __le32 enable;
+       __be32 ipv4addrs[WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES];
+} __packed;
+
+static inline int wsm_set_arp_ipv4_filter(struct cw1200_common *priv,
+                                         struct wsm_mib_arp_ipv4_filter *fp)
+{
+       return wsm_write_mib(priv, WSM_MIB_ID_ARP_IP_ADDRESSES_TABLE,
+                           fp, sizeof(*fp));
+}
+
+/* P2P Power Save Mode Info - 4.31 */
+struct wsm_p2p_ps_modeinfo {
+       u8      opp_ps_ct_window;
+       u8      count;
+       u8      reserved;
+       u8      dtim_count;
+       __le32  duration;
+       __le32  interval;
+       __le32  start_time;
+} __packed;
+
+static inline int wsm_set_p2p_ps_modeinfo(struct cw1200_common *priv,
+                                         struct wsm_p2p_ps_modeinfo *mi)
+{
+       return wsm_write_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO,
+                            mi, sizeof(*mi));
+}
+
+static inline int wsm_get_p2p_ps_modeinfo(struct cw1200_common *priv,
+                                         struct wsm_p2p_ps_modeinfo *mi)
+{
+       return wsm_read_mib(priv, WSM_MIB_ID_P2P_PS_MODE_INFO,
+                           mi, sizeof(*mi));
+}
+
+/* UseMultiTxConfMessage */
+
+static inline int wsm_use_multi_tx_conf(struct cw1200_common *priv,
+                                       bool enabled)
+{
+       __le32 arg = enabled ? __cpu_to_le32(1) : 0;
+
+       return wsm_write_mib(priv, WSM_MIB_USE_MULTI_TX_CONF,
+                       &arg, sizeof(arg));
+}
+
+
+/* 4.26 SetUpasdInformation */
+struct wsm_uapsd_info {
+       __le16 uapsd_flags;
+       __le16 min_auto_trigger_interval;
+       __le16 max_auto_trigger_interval;
+       __le16 auto_trigger_step;
+};
+
+static inline int wsm_set_uapsd_info(struct cw1200_common *priv,
+                                    struct wsm_uapsd_info *arg)
+{
+       return wsm_write_mib(priv, WSM_MIB_ID_SET_UAPSD_INFORMATION,
+                               arg, sizeof(*arg));
+}
+
+/* 4.22 OverrideInternalTxRate */
+struct wsm_override_internal_txrate {
+       u8 internalTxRate;
+       u8 nonErpInternalTxRate;
+       u8 reserved[2];
+} __packed;
+
+static inline int wsm_set_override_internal_txrate(struct cw1200_common *priv,
+                                    struct wsm_override_internal_txrate *arg)
+{
+       return wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE,
+                               arg, sizeof(*arg));
+}
+
+/* ******************************************************************** */
+/* WSM TX port control                                                 */
+
+void wsm_lock_tx(struct cw1200_common *priv);
+void wsm_lock_tx_async(struct cw1200_common *priv);
+bool wsm_flush_tx(struct cw1200_common *priv);
+void wsm_unlock_tx(struct cw1200_common *priv);
+
+/* ******************************************************************** */
+/* WSM / BH API                                                                */
+
+int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len);
+int wsm_handle_rx(struct cw1200_common *priv, u16 id, struct wsm_hdr *wsm,
+                 struct sk_buff **skb_p);
+
+/* ******************************************************************** */
+/* wsm_buf API                                                         */
+
+struct wsm_buf {
+       u8 *begin;
+       u8 *data;
+       u8 *end;
+};
+
+void wsm_buf_init(struct wsm_buf *buf);
+void wsm_buf_deinit(struct wsm_buf *buf);
+
+/* ******************************************************************** */
+/* wsm_cmd API                                                         */
+
+struct wsm_cmd {
+       spinlock_t lock; /* Protect structure from multiple access */
+       int done;
+       u8 *ptr;
+       size_t len;
+       void *arg;
+       int ret;
+       u16 cmd;
+};
+
+/* ******************************************************************** */
+/* WSM TX buffer access                                                        */
+
+int wsm_get_tx(struct cw1200_common *priv, u8 **data,
+              size_t *tx_len, int *burst);
+void wsm_txed(struct cw1200_common *priv, u8 *data);
+
+/* ******************************************************************** */
+/* Queue mapping: WSM <---> linux                                      */
+/* Linux: VO VI BE BK                                                  */
+/* WSM:   BE BK VI VO                                                  */
+
+static inline u8 wsm_queue_id_to_linux(u8 queue_id)
+{
+       static const u8 queue_mapping[] = {
+               2, 3, 1, 0
+       };
+       return queue_mapping[queue_id];
+}
+
+static inline u8 wsm_queue_id_to_wsm(u8 queue_id)
+{
+       static const u8 queue_mapping[] = {
+               3, 2, 0, 1
+       };
+       return queue_mapping[queue_id];
+}
+
+#endif /* CW1200_HWIO_H_INCLUDED */
index 15920aaa5dd6c766184335360937139295769dbe..f8ab193009cd9f3044a3d8846d4da4e7cbaec3e5 100644 (file)
@@ -6242,8 +6242,6 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
        if ((val & 0x0000ff00) != 0)
                pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff);
 
-       pci_set_power_state(pci_dev, PCI_D0);
-
        if (!ipw2100_hw_is_adapter_in_system(dev)) {
                printk(KERN_WARNING DRV_NAME
                       "Device not found via register read.\n");
index d96257b79a84a90c4f0c14ff8273f4e53dc86057..312fa0e42e6f5e0f29bf66e810a9bc4c41ca271a 100644 (file)
@@ -3548,6 +3548,7 @@ static int ipw_load(struct ipw_priv *priv)
                ipw_rx_queue_reset(priv, priv->rxq);
        if (!priv->rxq) {
                IPW_ERROR("Unable to initialize Rx queue\n");
+               rc = -ENOMEM;
                goto error;
        }
 
index 95a1ca1e895c6de72a7a792b66e32bc137876326..9ffe65931b293e4d6e4dd1f685dd75376e8dce96 100644 (file)
@@ -1195,7 +1195,7 @@ static int libipw_parse_info_param(struct libipw_info_element
 #ifdef CONFIG_LIBIPW_DEBUG
                                p += snprintf(p, sizeof(rates_str) -
                                              (p - rates_str), "%02X ",
-                                             network->rates[i]);
+                                             network->rates_ex[i]);
 #endif
                                if (libipw_is_ofdm_rate
                                    (info_element->data[i])) {
index b37a582ccbe7872eca2e5f5f8da14abe32f335e1..866ce6c684055e3e6a06f79681c75f73f9ccae27 100644 (file)
@@ -3727,7 +3727,8 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         * 5. Setup HW Constants
         * ********************/
        /* Device-specific setup */
-       if (il3945_hw_set_hw_params(il)) {
+       err = il3945_hw_set_hw_params(il);
+       if (err) {
                IL_ERR("failed to set hw settings\n");
                goto out_eeprom_free;
        }
index dc1e6da9976af0977ae197475694a8f544ae1643..c092033945cc460022e1c8ea28e2622a9db0a79f 100644 (file)
@@ -331,6 +331,19 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
                return;
        }
 
+       /*
+        * Firmware will not transmit frame on passive channel, if it not yet
+        * received some valid frame on that channel. When this error happen
+        * we have to wait until firmware will unblock itself i.e. when we
+        * note received beacon or other frame. We unblock queues in
+        * il3945_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
+        */
+       if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
+           il->iw_mode == NL80211_IFTYPE_STATION) {
+               il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+               D_INFO("Stopped queues - RX waiting on passive channel\n");
+       }
+
        txq->time_stamp = jiffies;
        info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]);
        ieee80211_tx_info_clear_status(info);
@@ -488,6 +501,11 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
                return;
        }
 
+       if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
+               il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+               D_INFO("Woke queues - frame received on passive channel\n");
+       }
+
        skb = dev_alloc_skb(128);
        if (!skb) {
                IL_ERR("dev_alloc_skb failed\n");
index 9a95045c97b6b4fe6041f457a1999ecfb7926d12..d287fd2dce4250f636040a2e933e9c6ca5c9b394 100644 (file)
@@ -588,6 +588,11 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
                return;
        }
 
+       if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
+               il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+               D_INFO("Woke queues - frame received on passive channel\n");
+       }
+
        /* In case of HW accelerated crypto and bad decryption, drop */
        if (!il->cfg->mod_params->sw_crypto &&
            il_set_decrypted_flag(il, hdr, ampdu_status, stats))
@@ -2806,6 +2811,19 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
                return;
        }
 
+       /*
+        * Firmware will not transmit frame on passive channel, if it not yet
+        * received some valid frame on that channel. When this error happen
+        * we have to wait until firmware will unblock itself i.e. when we
+        * note received beacon or other frame. We unblock queues in
+        * il4965_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
+        */
+       if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
+           il->iw_mode == NL80211_IFTYPE_STATION) {
+               il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+               D_INFO("Stopped queues - RX waiting on passive channel\n");
+       }
+
        spin_lock_irqsave(&il->sta_lock, flags);
        if (txq->sched_retry) {
                const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
@@ -5741,7 +5759,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
        hw->flags =
            IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
            IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
-           IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+           IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS |
+           IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
        if (il->cfg->sku & IL_SKU_N)
                hw->flags |=
                    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
index 3b6c994008920e76ee455ef3512a8e380d20b4b6..048421511988afb12892bc7f9951d07f90d5d85c 100644 (file)
@@ -1347,14 +1347,6 @@ struct il_rx_mpdu_res_start {
 #define TX_CMD_SEC_SHIFT       6
 #define TX_CMD_SEC_KEY128      0x08
 
-/*
- * security overhead sizes
- */
-#define WEP_IV_LEN 4
-#define WEP_ICV_LEN 4
-#define CCMP_MIC_LEN 8
-#define TKIP_ICV_LEN 4
-
 /*
  * C_TX = 0x1c (command)
  */
index e9a3cbc409ae1d239fb7ff9c4a43436e9994f26d..3195aad440ddf6317b1b9b34d2e2f57e773755c3 100644 (file)
@@ -5306,6 +5306,17 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        if (changes & BSS_CHANGED_BSSID) {
                D_MAC80211("BSSID %pM\n", bss_conf->bssid);
 
+               /*
+                * On passive channel we wait with blocked queues to see if
+                * there is traffic on that channel. If no frame will be
+                * received (what is very unlikely since scan detects AP on
+                * that channel, but theoretically possible), mac80211 associate
+                * procedure will time out and mac80211 will call us with NULL
+                * bssid. We have to unblock queues on such condition.
+                */
+               if (is_zero_ether_addr(bss_conf->bssid))
+                       il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+
                /*
                 * If there is currently a HW scan going on in the background,
                 * then we need to cancel it, otherwise sometimes we are not
index 4caaf52986a44355035d6e103b1dc6c437100a75..83f8ed8a5528cbd209c97560e9ab9c59d2294a90 100644 (file)
@@ -1299,6 +1299,8 @@ struct il_priv {
        /* queue refcounts */
 #define IL_MAX_HW_QUEUES       32
        unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
+#define IL_STOP_REASON_PASSIVE 0
+       unsigned long stop_reason;
        /* for each AC */
        atomic_t queue_stop_count[4];
 
@@ -2256,6 +2258,19 @@ il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
        txq->swq_id = (hwq << 2) | ac;
 }
 
+static inline void
+_il_wake_queue(struct il_priv *il, u8 ac)
+{
+       if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
+               ieee80211_wake_queue(il->hw, ac);
+}
+
+static inline void
+_il_stop_queue(struct il_priv *il, u8 ac)
+{
+       if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
+               ieee80211_stop_queue(il->hw, ac);
+}
 static inline void
 il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
 {
@@ -2264,8 +2279,7 @@ il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
        u8 hwq = (queue >> 2) & 0x1f;
 
        if (test_and_clear_bit(hwq, il->queue_stopped))
-               if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
-                       ieee80211_wake_queue(il->hw, ac);
+               _il_wake_queue(il, ac);
 }
 
 static inline void
@@ -2276,8 +2290,27 @@ il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
        u8 hwq = (queue >> 2) & 0x1f;
 
        if (!test_and_set_bit(hwq, il->queue_stopped))
-               if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
-                       ieee80211_stop_queue(il->hw, ac);
+               _il_stop_queue(il, ac);
+}
+
+static inline void
+il_wake_queues_by_reason(struct il_priv *il, int reason)
+{
+       u8 ac;
+
+       if (test_and_clear_bit(reason, &il->stop_reason))
+               for (ac = 0; ac < 4; ac++)
+                       _il_wake_queue(il, ac);
+}
+
+static inline void
+il_stop_queues_by_reason(struct il_priv *il, int reason)
+{
+       u8 ac;
+
+       if (!test_and_set_bit(reason, &il->stop_reason))
+               for (ac = 0; ac < 4; ac++)
+                       _il_stop_queue(il, ac);
 }
 
 #ifdef ieee80211_stop_queue
index 3b5613ea458b2093244dc22ce298390f61ed515a..f55a758b87f6c9a9953caff23670722a6ca3280e 100644 (file)
@@ -7,14 +7,16 @@ iwlwifi-objs          += iwl-notif-wait.o
 iwlwifi-objs           += iwl-eeprom-read.o iwl-eeprom-parse.o
 iwlwifi-objs           += iwl-phy-db.o iwl-nvm-parse.o
 iwlwifi-objs           += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs           += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwl-7000.o
+iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
+iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o
+
+iwlwifi-objs += $(iwlwifi-m)
 
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
 
 ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
 
-
 obj-$(CONFIG_IWLDVM)   += dvm/
 obj-$(CONFIG_IWLMVM)   += mvm/
 
index 48545ab003110559f4c5d097024c1066afbcd530..de2c9514bef6bbdb255a7de4ec51f2ad19729b5f 100644 (file)
 #define IWL_INVALID_STATION    255
 
 /* device operations */
-extern struct iwl_lib_ops iwl1000_lib;
-extern struct iwl_lib_ops iwl2000_lib;
-extern struct iwl_lib_ops iwl2030_lib;
-extern struct iwl_lib_ops iwl5000_lib;
-extern struct iwl_lib_ops iwl5150_lib;
-extern struct iwl_lib_ops iwl6000_lib;
-extern struct iwl_lib_ops iwl6030_lib;
+extern const struct iwl_dvm_cfg iwl_dvm_1000_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_2000_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_105_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_2030_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_5000_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_5150_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_6000_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_6005_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_6050_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
 
 
 #define TIME_UNIT              1024
@@ -291,8 +294,8 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena);
 
 static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
 {
-       return priv->cfg->bt_params &&
-              priv->cfg->bt_params->advanced_bt_coexist;
+       return priv->lib->bt_params &&
+              priv->lib->bt_params->advanced_bt_coexist;
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUG
index d6c4cf2ad7c530c32f95cd96491a96ea23af15ec..1b0f0d502568700d096215b92fc1a1dce1afeb19 100644 (file)
@@ -521,7 +521,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
 
        iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
 
-       if (priv->cfg->base_params->hd_v2) {
+       if (priv->lib->hd_v2) {
                cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
                        HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
                cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
@@ -895,7 +895,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
                        continue;
                }
 
-               delta_g = (priv->cfg->base_params->chain_noise_scale *
+               delta_g = (priv->lib->chain_noise_scale *
                        ((s32)average_noise[default_chain] -
                        (s32)average_noise[i])) / 1500;
 
@@ -1051,8 +1051,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
                return;
 
        /* Analyze signal for disconnected antenna */
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist) {
+       if (priv->lib->bt_params &&
+           priv->lib->bt_params->advanced_bt_coexist) {
                /* Disable disconnected antenna algorithm for advanced
                   bt coex, assuming valid antennas are connected */
                data->active_chains = priv->nvm_data->valid_rx_ant;
index 95ca026ecc9d6be7383e2986385e84d374cb6964..ebdac909f0cd750bffbe2b46bd49ea4975079340 100644 (file)
@@ -838,10 +838,6 @@ struct iwl_qosparam_cmd {
 #define STA_MODIFY_DELBA_TID_MSK       0x10
 #define STA_MODIFY_SLEEP_TX_COUNT_MSK  0x20
 
-/* Receiver address (actually, Rx station's index into station table),
- * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
-#define BUILD_RAxTID(sta_id, tid)      (((sta_id) << 4) + (tid))
-
 /* agn */
 struct iwl_keyinfo {
        __le16 key_flags;
@@ -1224,14 +1220,6 @@ struct iwl_rx_mpdu_res_start {
 #define TX_CMD_SEC_SHIFT       6
 #define TX_CMD_SEC_KEY128      0x08
 
-/*
- * security overhead sizes
- */
-#define WEP_IV_LEN 4
-#define WEP_ICV_LEN 4
-#define CCMP_MIC_LEN 8
-#define TKIP_ICV_LEN 4
-
 /*
  * REPLY_TX = 0x1c (command)
  */
index 71ea77576d222cc0154d062d42d53191a8c4fc67..5cd87f94926640086e5ffbebf0f05226b93062ce 100644 (file)
@@ -568,16 +568,61 @@ struct iwl_hw_params {
        const struct iwl_sensitivity_ranges *sens;
 };
 
-struct iwl_lib_ops {
-       /* set hw dependent parameters */
+/**
+ * struct iwl_dvm_bt_params - DVM specific BT (coex) parameters
+ * @advanced_bt_coexist: support advanced bt coexist
+ * @bt_init_traffic_load: specify initial bt traffic load
+ * @bt_prio_boost: default bt priority boost value
+ * @agg_time_limit: maximum number of uSec in aggregation
+ * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
+ */
+struct iwl_dvm_bt_params {
+       bool advanced_bt_coexist;
+       u8 bt_init_traffic_load;
+       u32 bt_prio_boost;
+       u16 agg_time_limit;
+       bool bt_sco_disable;
+       bool bt_session_2;
+};
+
+/**
+ * struct iwl_dvm_cfg - DVM firmware specific device configuration
+ * @set_hw_params: set hardware parameters
+ * @set_channel_switch: send channel switch command
+ * @nic_config: apply device specific configuration
+ * @temperature: read temperature
+ * @adv_thermal_throttle: support advance thermal throttle
+ * @support_ct_kill_exit: support ct kill exit condition
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ *     radio tuning when there is a high receiving plcp error rate
+ * @chain_noise_scale: default chain noise scale used for gain computation
+ * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * @no_idle_support: do not support idle mode
+ * @bt_params: pointer to BT parameters
+ * @need_temp_offset_calib: need to perform temperature offset calibration
+ * @no_xtal_calib: some devices do not need crystal calibration data,
+ *     don't send it to those
+ * @temp_offset_v2: support v2 of temperature offset calibration
+ * @adv_pm: advanced power management
+ */
+struct iwl_dvm_cfg {
        void (*set_hw_params)(struct iwl_priv *priv);
        int (*set_channel_switch)(struct iwl_priv *priv,
                                  struct ieee80211_channel_switch *ch_switch);
-       /* device specific configuration */
        void (*nic_config)(struct iwl_priv *priv);
-
-       /* temperature */
        void (*temperature)(struct iwl_priv *priv);
+
+       const struct iwl_dvm_bt_params *bt_params;
+       s32 chain_noise_scale;
+       u8 plcp_delta_threshold;
+       bool adv_thermal_throttle;
+       bool support_ct_kill_exit;
+       bool hd_v2;
+       bool no_idle_support;
+       bool need_temp_offset_calib;
+       bool no_xtal_calib;
+       bool temp_offset_v2;
+       bool adv_pm;
 };
 
 struct iwl_wipan_noa_data {
@@ -610,7 +655,7 @@ struct iwl_priv {
        struct device *dev;             /* for debug prints only */
        const struct iwl_cfg *cfg;
        const struct iwl_fw *fw;
-       const struct iwl_lib_ops *lib;
+       const struct iwl_dvm_cfg *lib;
        unsigned long status;
 
        spinlock_t sta_lock;
@@ -870,6 +915,9 @@ struct iwl_priv {
        __le64 replay_ctr;
        __le16 last_seq_ctl;
        bool have_rekey_data;
+#ifdef CONFIG_PM_SLEEP
+       struct wiphy_wowlan_support wowlan_support;
+#endif
 
        /* device_pointers: pointers to ucode event tables */
        struct {
index c48907c8ab4336db00b1719e86b40d5051f33425..352c6cb7b4f17d5b85facf619839f93beaec5ade 100644 (file)
@@ -174,10 +174,13 @@ static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
        priv->hw_params.sens = &iwl1000_sensitivity;
 }
 
-struct iwl_lib_ops iwl1000_lib = {
+const struct iwl_dvm_cfg iwl_dvm_1000_cfg = {
        .set_hw_params = iwl1000_hw_set_hw_params,
        .nic_config = iwl1000_nic_config,
        .temperature = iwlagn_temperature,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
 };
 
 
@@ -232,16 +235,56 @@ static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
        priv->hw_params.sens = &iwl2000_sensitivity;
 }
 
-struct iwl_lib_ops iwl2000_lib = {
+const struct iwl_dvm_cfg iwl_dvm_2000_cfg = {
        .set_hw_params = iwl2000_hw_set_hw_params,
        .nic_config = iwl2000_nic_config,
        .temperature = iwlagn_temperature,
+       .adv_thermal_throttle = true,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+       .hd_v2 = true,
+       .need_temp_offset_calib = true,
+       .temp_offset_v2 = true,
 };
 
-struct iwl_lib_ops iwl2030_lib = {
+const struct iwl_dvm_cfg iwl_dvm_105_cfg = {
        .set_hw_params = iwl2000_hw_set_hw_params,
        .nic_config = iwl2000_nic_config,
        .temperature = iwlagn_temperature,
+       .adv_thermal_throttle = true,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+       .hd_v2 = true,
+       .need_temp_offset_calib = true,
+       .temp_offset_v2 = true,
+       .adv_pm = true,
+};
+
+static const struct iwl_dvm_bt_params iwl2030_bt_params = {
+       /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+       .advanced_bt_coexist = true,
+       .agg_time_limit = BT_AGG_THRESHOLD_DEF,
+       .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+       .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32,
+       .bt_sco_disable = true,
+       .bt_session_2 = true,
+};
+
+const struct iwl_dvm_cfg iwl_dvm_2030_cfg = {
+       .set_hw_params = iwl2000_hw_set_hw_params,
+       .nic_config = iwl2000_nic_config,
+       .temperature = iwlagn_temperature,
+       .adv_thermal_throttle = true,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+       .hd_v2 = true,
+       .bt_params = &iwl2030_bt_params,
+       .need_temp_offset_calib = true,
+       .temp_offset_v2 = true,
+       .adv_pm = true,
 };
 
 /*
@@ -420,16 +463,23 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
        return iwl_dvm_send_cmd(priv, &hcmd);
 }
 
-struct iwl_lib_ops iwl5000_lib = {
+const struct iwl_dvm_cfg iwl_dvm_5000_cfg = {
        .set_hw_params = iwl5000_hw_set_hw_params,
        .set_channel_switch = iwl5000_hw_channel_switch,
        .temperature = iwlagn_temperature,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+       .no_idle_support = true,
 };
 
-struct iwl_lib_ops iwl5150_lib = {
+const struct iwl_dvm_cfg iwl_dvm_5150_cfg = {
        .set_hw_params = iwl5150_hw_set_hw_params,
        .set_channel_switch = iwl5000_hw_channel_switch,
        .temperature = iwl5150_temperature,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+       .no_idle_support = true,
+       .no_xtal_calib = true,
 };
 
 
@@ -584,16 +634,59 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
        return err;
 }
 
-struct iwl_lib_ops iwl6000_lib = {
+const struct iwl_dvm_cfg iwl_dvm_6000_cfg = {
        .set_hw_params = iwl6000_hw_set_hw_params,
        .set_channel_switch = iwl6000_hw_channel_switch,
        .nic_config = iwl6000_nic_config,
        .temperature = iwlagn_temperature,
+       .adv_thermal_throttle = true,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+};
+
+const struct iwl_dvm_cfg iwl_dvm_6005_cfg = {
+       .set_hw_params = iwl6000_hw_set_hw_params,
+       .set_channel_switch = iwl6000_hw_channel_switch,
+       .nic_config = iwl6000_nic_config,
+       .temperature = iwlagn_temperature,
+       .adv_thermal_throttle = true,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+       .need_temp_offset_calib = true,
+};
+
+const struct iwl_dvm_cfg iwl_dvm_6050_cfg = {
+       .set_hw_params = iwl6000_hw_set_hw_params,
+       .set_channel_switch = iwl6000_hw_channel_switch,
+       .nic_config = iwl6000_nic_config,
+       .temperature = iwlagn_temperature,
+       .adv_thermal_throttle = true,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .chain_noise_scale = 1500,
+};
+
+static const struct iwl_dvm_bt_params iwl6000_bt_params = {
+       /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+       .advanced_bt_coexist = true,
+       .agg_time_limit = BT_AGG_THRESHOLD_DEF,
+       .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+       .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+       .bt_sco_disable = true,
 };
 
-struct iwl_lib_ops iwl6030_lib = {
+const struct iwl_dvm_cfg iwl_dvm_6030_cfg = {
        .set_hw_params = iwl6000_hw_set_hw_params,
        .set_channel_switch = iwl6000_hw_channel_switch,
        .nic_config = iwl6000_nic_config,
        .temperature = iwlagn_temperature,
+       .adv_thermal_throttle = true,
+       .support_ct_kill_exit = true,
+       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+       .chain_noise_scale = 1000,
+       .bt_params = &iwl6000_bt_params,
+       .need_temp_offset_calib = true,
+       .adv_pm = true,
 };
index 54f553380aa88db23af690690d98fd8f1fd51792..9879550a0feaf50f5e543384eede4ea366615579 100644 (file)
@@ -254,23 +254,23 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
        BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
                        sizeof(basic.bt3_lookup_table));
 
-       if (priv->cfg->bt_params) {
+       if (priv->lib->bt_params) {
                /*
                 * newer generation of devices (2000 series and newer)
                 * use the version 2 of the bt command
                 * we need to make sure sending the host command
                 * with correct data structure to avoid uCode assert
                 */
-               if (priv->cfg->bt_params->bt_session_2) {
+               if (priv->lib->bt_params->bt_session_2) {
                        bt_cmd_v2.prio_boost = cpu_to_le32(
-                               priv->cfg->bt_params->bt_prio_boost);
+                               priv->lib->bt_params->bt_prio_boost);
                        bt_cmd_v2.tx_prio_boost = 0;
                        bt_cmd_v2.rx_prio_boost = 0;
                } else {
                        /* older version only has 8 bits */
-                       WARN_ON(priv->cfg->bt_params->bt_prio_boost & ~0xFF);
+                       WARN_ON(priv->lib->bt_params->bt_prio_boost & ~0xFF);
                        bt_cmd_v1.prio_boost =
-                               priv->cfg->bt_params->bt_prio_boost;
+                               priv->lib->bt_params->bt_prio_boost;
                        bt_cmd_v1.tx_prio_boost = 0;
                        bt_cmd_v1.rx_prio_boost = 0;
                }
@@ -330,7 +330,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
                       priv->bt_full_concurrent ?
                       "full concurrency" : "3-wire");
 
-       if (priv->cfg->bt_params->bt_session_2) {
+       if (priv->lib->bt_params->bt_session_2) {
                memcpy(&bt_cmd_v2.basic, &basic,
                        sizeof(basic));
                ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
@@ -758,8 +758,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv)
  */
 static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
 {
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist &&
+       if (priv->lib->bt_params &&
+           priv->lib->bt_params->advanced_bt_coexist &&
            (priv->bt_full_concurrent ||
             priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
                /*
@@ -830,8 +830,8 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        else
                active_chains = priv->nvm_data->valid_rx_ant;
 
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist &&
+       if (priv->lib->bt_params &&
+           priv->lib->bt_params->advanced_bt_coexist &&
            (priv->bt_full_concurrent ||
             priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
                /*
index cab23af0be9e604051e0c2a5d4b8b0a87073a7ca..eef64bb854f723ebaf744597d0e90461ef0e5ddc 100644 (file)
@@ -208,20 +208,21 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
            priv->trans->ops->d3_suspend &&
            priv->trans->ops->d3_resume &&
            device_can_wakeup(priv->trans->dev)) {
-               hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
-                                         WIPHY_WOWLAN_DISCONNECT |
-                                         WIPHY_WOWLAN_EAP_IDENTITY_REQ |
-                                         WIPHY_WOWLAN_RFKILL_RELEASE;
+               priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT |
+                                            WIPHY_WOWLAN_DISCONNECT |
+                                            WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+                                            WIPHY_WOWLAN_RFKILL_RELEASE;
                if (!iwlwifi_mod_params.sw_crypto)
-                       hw->wiphy->wowlan.flags |=
+                       priv->wowlan_support.flags |=
                                WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
                                WIPHY_WOWLAN_GTK_REKEY_FAILURE;
 
-               hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
-               hw->wiphy->wowlan.pattern_min_len =
+               priv->wowlan_support.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
+               priv->wowlan_support.pattern_min_len =
                                        IWLAGN_WOWLAN_MIN_PATTERN_LEN;
-               hw->wiphy->wowlan.pattern_max_len =
+               priv->wowlan_support.pattern_max_len =
                                        IWLAGN_WOWLAN_MAX_PATTERN_LEN;
+               hw->wiphy->wowlan = &priv->wowlan_support;
        }
 #endif
 
@@ -426,7 +427,11 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
        if (ret)
                goto error;
 
-       iwl_trans_d3_suspend(priv->trans);
+       /* let the ucode operate on its own */
+       iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
+                   CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+       iwl_trans_d3_suspend(priv->trans, false);
 
        goto out;
 
@@ -500,7 +505,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
        /* we'll clear ctx->vif during iwlagn_prepare_restart() */
        vif = ctx->vif;
 
-       ret = iwl_trans_d3_resume(priv->trans, &d3_status);
+       ret = iwl_trans_d3_resume(priv->trans, &d3_status, false);
        if (ret)
                goto out_unlock;
 
@@ -509,6 +514,10 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
                goto out_unlock;
        }
 
+       /* uCode is no longer operating by itself */
+       iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
+                   CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
        base = priv->device_pointers.error_event_table;
        if (!iwlagn_hw_valid_rtc_data_addr(base)) {
                IWL_WARN(priv, "Invalid error table during resume!\n");
@@ -1276,8 +1285,8 @@ static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->mutex);
 
-       if (priv->cfg->bt_params &&
-                       priv->cfg->bt_params->advanced_bt_coexist) {
+       if (priv->lib->bt_params &&
+           priv->lib->bt_params->advanced_bt_coexist) {
                if (rssi_event == RSSI_EVENT_LOW)
                        priv->bt_enable_pspoll = true;
                else if (rssi_event == RSSI_EVENT_HIGH)
@@ -1387,7 +1396,7 @@ static int iwl_setup_interface(struct iwl_priv *priv,
                return err;
        }
 
-       if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
+       if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist &&
            vif->type == NL80211_IFTYPE_ADHOC) {
                /*
                 * pretend to have high BT traffic as long as we
index 74d7572e709125160f0a0db02b8e337d23d96cc5..7aa9c8dc33ef6a51a7a64f60fa8c76130690393a 100644 (file)
@@ -615,7 +615,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
 
        priv->thermal_throttle.ct_kill_toggle = false;
 
-       if (priv->cfg->base_params->support_ct_kill_exit) {
+       if (priv->lib->support_ct_kill_exit) {
                adv_cmd.critical_temperature_enter =
                        cpu_to_le32(priv->hw_params.ct_kill_threshold);
                adv_cmd.critical_temperature_exit =
@@ -732,10 +732,10 @@ int iwl_alive_start(struct iwl_priv *priv)
        }
 
        /* download priority table before any calibration request */
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist) {
+       if (priv->lib->bt_params &&
+           priv->lib->bt_params->advanced_bt_coexist) {
                /* Configure Bluetooth device coexistence support */
-               if (priv->cfg->bt_params->bt_sco_disable)
+               if (priv->lib->bt_params->bt_sco_disable)
                        priv->bt_enable_pspoll = false;
                else
                        priv->bt_enable_pspoll = true;
@@ -873,9 +873,9 @@ void iwl_down(struct iwl_priv *priv)
        priv->bt_status = 0;
        priv->cur_rssi_ctx = NULL;
        priv->bt_is_sco = 0;
-       if (priv->cfg->bt_params)
+       if (priv->lib->bt_params)
                priv->bt_traffic_load =
-                        priv->cfg->bt_params->bt_init_traffic_load;
+                        priv->lib->bt_params->bt_init_traffic_load;
        else
                priv->bt_traffic_load = 0;
        priv->bt_full_concurrent = false;
@@ -1058,7 +1058,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
 
        iwl_setup_scan_deferred_work(priv);
 
-       if (priv->cfg->bt_params)
+       if (priv->lib->bt_params)
                iwlagn_bt_setup_deferred_work(priv);
 
        init_timer(&priv->statistics_periodic);
@@ -1072,7 +1072,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
 
 void iwl_cancel_deferred_work(struct iwl_priv *priv)
 {
-       if (priv->cfg->bt_params)
+       if (priv->lib->bt_params)
                iwlagn_bt_cancel_deferred_work(priv);
 
        cancel_work_sync(&priv->run_time_calib_work);
@@ -1098,8 +1098,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
 
        priv->band = IEEE80211_BAND_2GHZ;
 
-       priv->plcp_delta_threshold =
-               priv->cfg->base_params->plcp_delta_threshold;
+       priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold;
 
        priv->iw_mode = NL80211_IFTYPE_STATION;
        priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
@@ -1116,8 +1115,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
        iwl_init_scan_params(priv);
 
        /* init bt coex */
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist) {
+       if (priv->lib->bt_params &&
+           priv->lib->bt_params->advanced_bt_coexist) {
                priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
                priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
                priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
@@ -1264,31 +1263,37 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
        switch (priv->cfg->device_family) {
        case IWL_DEVICE_FAMILY_1000:
        case IWL_DEVICE_FAMILY_100:
-               priv->lib = &iwl1000_lib;
+               priv->lib = &iwl_dvm_1000_cfg;
                break;
        case IWL_DEVICE_FAMILY_2000:
+               priv->lib = &iwl_dvm_2000_cfg;
+               break;
        case IWL_DEVICE_FAMILY_105:
-               priv->lib = &iwl2000_lib;
+               priv->lib = &iwl_dvm_105_cfg;
                break;
        case IWL_DEVICE_FAMILY_2030:
        case IWL_DEVICE_FAMILY_135:
-               priv->lib = &iwl2030_lib;
+               priv->lib = &iwl_dvm_2030_cfg;
                break;
        case IWL_DEVICE_FAMILY_5000:
-               priv->lib = &iwl5000_lib;
+               priv->lib = &iwl_dvm_5000_cfg;
                break;
        case IWL_DEVICE_FAMILY_5150:
-               priv->lib = &iwl5150_lib;
+               priv->lib = &iwl_dvm_5150_cfg;
                break;
        case IWL_DEVICE_FAMILY_6000:
-       case IWL_DEVICE_FAMILY_6005:
        case IWL_DEVICE_FAMILY_6000i:
+               priv->lib = &iwl_dvm_6000_cfg;
+               break;
+       case IWL_DEVICE_FAMILY_6005:
+               priv->lib = &iwl_dvm_6005_cfg;
+               break;
        case IWL_DEVICE_FAMILY_6050:
        case IWL_DEVICE_FAMILY_6150:
-               priv->lib = &iwl6000_lib;
+               priv->lib = &iwl_dvm_6050_cfg;
                break;
        case IWL_DEVICE_FAMILY_6030:
-               priv->lib = &iwl6030_lib;
+               priv->lib = &iwl_dvm_6030_cfg;
                break;
        default:
                break;
@@ -1854,14 +1859,9 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
                return pos;
        }
 
-#ifdef CONFIG_IWLWIFI_DEBUG
        if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
                size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
                        ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
-#else
-       size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
-               ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
-#endif
        IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
                size);
 
@@ -1905,10 +1905,8 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
        unsigned int reload_msec;
        unsigned long reload_jiffies;
 
-#ifdef CONFIG_IWLWIFI_DEBUG
        if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
                iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
-#endif
 
        /* uCode is no longer loaded. */
        priv->ucode_loaded = false;
index bd69018d07a95f03b0da607548b2d0b97f677e3e..77cb59712235035c689034575bfd58794b933245 100644 (file)
@@ -163,7 +163,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
        u8 skip;
        u32 slp_itrvl;
 
-       if (priv->cfg->adv_pm) {
+       if (priv->lib->adv_pm) {
                table = apm_range_2;
                if (period <= IWL_DTIM_RANGE_1_MAX)
                        table = apm_range_1;
@@ -217,7 +217,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
                cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
 
        if (iwl_advanced_bt_coexist(priv)) {
-               if (!priv->cfg->bt_params->bt_sco_disable)
+               if (!priv->lib->bt_params->bt_sco_disable)
                        cmd->flags |= IWL_POWER_BT_SCO_ENA;
                else
                        cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -293,7 +293,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
 
        if (priv->wowlan)
                iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
-       else if (!priv->cfg->base_params->no_idle_support &&
+       else if (!priv->lib->no_idle_support &&
                 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
                iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
        else if (iwl_tt_is_low_power_state(priv)) {
index 10fbb176cc8e1d8ca259a02dfaf9c83b0bea2514..8fe76dc4f373e8a0724e049a6fa320fc159c0382 100644 (file)
@@ -1088,7 +1088,7 @@ done:
            (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
                rs_program_fix_rate(priv, lq_sta);
 #endif
-       if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
+       if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist)
                rs_bt_update_lq(priv, ctx, lq_sta);
 }
 
@@ -3064,11 +3064,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
         * overwrite if needed, pass aggregation time limit
         * to uCode in uSec
         */
-       if (priv && priv->cfg->bt_params &&
-           priv->cfg->bt_params->agg_time_limit &&
+       if (priv && priv->lib->bt_params &&
+           priv->lib->bt_params->agg_time_limit &&
            priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
                lq_cmd->agg_params.agg_time_limit =
-                       cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
+                       cpu_to_le16(priv->lib->bt_params->agg_time_limit);
 }
 
 static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
index a4eed2055fdbe590f1d8325fffcd84d171aff19d..2f3fd160ab449c36e06d3172d08a253809ce41e8 100644 (file)
@@ -1102,7 +1102,7 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
        iwl_notification_wait_init(&priv->notif_wait);
 
        /* Set up BT Rx handlers */
-       if (priv->cfg->bt_params)
+       if (priv->lib->bt_params)
                iwlagn_bt_rx_handler_setup(priv);
 }
 
index d69b55866714d9936b34bde9b53f176191278341..8c686a5b90ac6657ad3e5e4126c61fba67726654 100644 (file)
@@ -801,8 +801,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
                 * Internal scans are passive, so we can indiscriminately set
                 * the BT ignore flag on 2.4 GHz since it applies to TX only.
                 */
-               if (priv->cfg->bt_params &&
-                   priv->cfg->bt_params->advanced_bt_coexist)
+               if (priv->lib->bt_params &&
+                   priv->lib->bt_params->advanced_bt_coexist)
                        scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
                break;
        case IEEE80211_BAND_5GHZ:
@@ -844,8 +844,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        band = priv->scan_band;
 
        if (band == IEEE80211_BAND_2GHZ &&
-           priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist) {
+           priv->lib->bt_params &&
+           priv->lib->bt_params->advanced_bt_coexist) {
                /* transmit 2.4 GHz probes only on first antenna */
                scan_tx_antennas = first_antenna(scan_tx_antennas);
        }
@@ -873,8 +873,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
 
                rx_ant = first_antenna(active_chains);
        }
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist &&
+       if (priv->lib->bt_params &&
+           priv->lib->bt_params->advanced_bt_coexist &&
            priv->bt_full_concurrent) {
                /* operated as 1x1 in full concurrency mode */
                rx_ant = first_antenna(rx_ant);
index 03f9bc01c0ccff9a53c020a1256f9b2c1a393be5..fbeee081ee2fccde51f0a227f6d9053f472722af 100644 (file)
@@ -627,7 +627,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
        INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
        INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
 
-       if (priv->cfg->base_params->adv_thermal_throttle) {
+       if (priv->lib->adv_thermal_throttle) {
                IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
                tt->restriction = kcalloc(IWL_TI_STATE_MAX,
                                          sizeof(struct iwl_tt_restriction),
index a900aaf477901ed033b826b96a9b247012f0a928..353a053b4eb1b475cf823280b40677421bc84372 100644 (file)
@@ -83,8 +83,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
        else if (ieee80211_is_back_req(fc))
                tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
        else if (info->band == IEEE80211_BAND_2GHZ &&
-                priv->cfg->bt_params &&
-                priv->cfg->bt_params->advanced_bt_coexist &&
+                priv->lib->bt_params &&
+                priv->lib->bt_params->advanced_bt_coexist &&
                 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
                 ieee80211_is_reassoc_req(fc) ||
                 skb->protocol == cpu_to_be16(ETH_P_PAE)))
@@ -202,8 +202,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
                rate_flags |= RATE_MCS_CCK_MSK;
 
        /* Set up antennas */
-        if (priv->cfg->bt_params &&
-            priv->cfg->bt_params->advanced_bt_coexist &&
+        if (priv->lib->bt_params &&
+            priv->lib->bt_params->advanced_bt_coexist &&
             priv->bt_full_concurrent) {
                /* operated as 1x1 in full concurrency mode */
                priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
@@ -986,8 +986,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
         * notification again.
         */
        if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
-           priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist) {
+           priv->lib->bt_params &&
+           priv->lib->bt_params->advanced_bt_coexist) {
                IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
        }
 
index 0a1cdc5e856baa6f37966cac3021d9db2558c6a9..86270b69cd024c3e9fe88f6361b15acc1f77eb39 100644 (file)
@@ -132,8 +132,8 @@ int iwl_init_alive_start(struct iwl_priv *priv)
 {
        int ret;
 
-       if (priv->cfg->bt_params &&
-           priv->cfg->bt_params->advanced_bt_coexist) {
+       if (priv->lib->bt_params &&
+           priv->lib->bt_params->advanced_bt_coexist) {
                /*
                 * Tell uCode we are ready to perform calibration
                 * need to perform this before any calibration
@@ -155,8 +155,8 @@ int iwl_init_alive_start(struct iwl_priv *priv)
         * temperature offset calibration is only needed for runtime ucode,
         * so prepare the value now.
         */
-       if (priv->cfg->need_temp_offset_calib) {
-               if (priv->cfg->temp_offset_v2)
+       if (priv->lib->need_temp_offset_calib) {
+               if (priv->lib->temp_offset_v2)
                        return iwl_set_temperature_offset_calib_v2(priv);
                else
                        return iwl_set_temperature_offset_calib(priv);
@@ -277,7 +277,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
        if (ret)
                return ret;
 
-       if (!priv->cfg->no_xtal_calib) {
+       if (!priv->lib->no_xtal_calib) {
                ret = iwl_set_Xtal_calib(priv);
                if (ret)
                        return ret;
index c080ae3070b2f650c3a451ee68eb63424c8f8155..0d2afe098afce81d1000d658bee9f788b632f797 100644 (file)
@@ -60,9 +60,6 @@ static const struct iwl_base_params iwl1000_base_params = {
        .max_ll_items = OTP_MAX_LL_ITEMS_1000,
        .shadow_ram_support = false,
        .led_compensation = 51,
-       .support_ct_kill_exit = true,
-       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
-       .chain_noise_scale = 1000,
        .wd_timeout = IWL_WATCHDOG_DISABLED,
        .max_event_log_size = 128,
 };
index a6ddd2f9fba0501acfa4ddcf11edc08d4e545ffb..c727ec7c90a65ec1f760c6e0cc53b3af026850e4 100644 (file)
@@ -72,14 +72,9 @@ static const struct iwl_base_params iwl2000_base_params = {
        .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
        .shadow_ram_support = true,
        .led_compensation = 51,
-       .adv_thermal_throttle = true,
-       .support_ct_kill_exit = true,
-       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
-       .chain_noise_scale = 1000,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
-       .hd_v2 = true,
 };
 
 
@@ -90,14 +85,9 @@ static const struct iwl_base_params iwl2030_base_params = {
        .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
        .shadow_ram_support = true,
        .led_compensation = 57,
-       .adv_thermal_throttle = true,
-       .support_ct_kill_exit = true,
-       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
-       .chain_noise_scale = 1000,
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
-       .hd_v2 = true,
 };
 
 static const struct iwl_ht_params iwl2000_ht_params = {
@@ -106,16 +96,6 @@ static const struct iwl_ht_params iwl2000_ht_params = {
        .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
 };
 
-static const struct iwl_bt_params iwl2030_bt_params = {
-       /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
-       .advanced_bt_coexist = true,
-       .agg_time_limit = BT_AGG_THRESHOLD_DEF,
-       .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
-       .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32,
-       .bt_sco_disable = true,
-       .bt_session_2 = true,
-};
-
 static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
        .regulatory_bands = {
                EEPROM_REG_BAND_1_CHANNELS,
@@ -137,12 +117,10 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
        .device_family = IWL_DEVICE_FAMILY_2000,                \
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
        .max_data_size = IWL60_RTC_DATA_SIZE,                   \
-       .nvm_ver = EEPROM_2000_EEPROM_VERSION,          \
-       .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,  \
+       .nvm_ver = EEPROM_2000_EEPROM_VERSION,                  \
+       .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,          \
        .base_params = &iwl2000_base_params,                    \
        .eeprom_params = &iwl20x0_eeprom_params,                \
-       .need_temp_offset_calib = true,                         \
-       .temp_offset_v2 = true,                                 \
        .led_mode = IWL_LED_RF_STATE
 
 const struct iwl_cfg iwl2000_2bgn_cfg = {
@@ -168,12 +146,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
        .nvm_ver = EEPROM_2000_EEPROM_VERSION,          \
        .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,  \
        .base_params = &iwl2030_base_params,                    \
-       .bt_params = &iwl2030_bt_params,                        \
        .eeprom_params = &iwl20x0_eeprom_params,                \
-       .need_temp_offset_calib = true,                         \
-       .temp_offset_v2 = true,                                 \
-       .led_mode = IWL_LED_RF_STATE,                           \
-       .adv_pm = true
+       .led_mode = IWL_LED_RF_STATE
 
 const struct iwl_cfg iwl2030_2bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -193,10 +167,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
        .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,  \
        .base_params = &iwl2000_base_params,                    \
        .eeprom_params = &iwl20x0_eeprom_params,                \
-       .need_temp_offset_calib = true,                         \
-       .temp_offset_v2 = true,                                 \
        .led_mode = IWL_LED_RF_STATE,                           \
-       .adv_pm = true,                                         \
        .rx_with_siso_diversity = true
 
 const struct iwl_cfg iwl105_bgn_cfg = {
@@ -222,12 +193,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
        .nvm_ver = EEPROM_2000_EEPROM_VERSION,          \
        .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,  \
        .base_params = &iwl2030_base_params,                    \
-       .bt_params = &iwl2030_bt_params,                        \
        .eeprom_params = &iwl20x0_eeprom_params,                \
-       .need_temp_offset_calib = true,                         \
-       .temp_offset_v2 = true,                                 \
        .led_mode = IWL_LED_RF_STATE,                           \
-       .adv_pm = true,                                         \
        .rx_with_siso_diversity = true
 
 const struct iwl_cfg iwl135_bgn_cfg = {
index 403f3f224bf6e9c3891092d01cb9ac6882af62ba..ecc01e1a61a1d14107ca523c6fd7339fe380c5d8 100644 (file)
@@ -59,11 +59,8 @@ static const struct iwl_base_params iwl5000_base_params = {
        .num_of_queues = IWLAGN_NUM_QUEUES,
        .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
        .led_compensation = 51,
-       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
-       .chain_noise_scale = 1000,
        .wd_timeout = IWL_WATCHDOG_DISABLED,
        .max_event_log_size = 512,
-       .no_idle_support = true,
 };
 
 static const struct iwl_ht_params iwl5000_ht_params = {
@@ -159,7 +156,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
        .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION,  \
        .base_params = &iwl5000_base_params,                    \
        .eeprom_params = &iwl5000_eeprom_params,                \
-       .no_xtal_calib = true,                                  \
        .led_mode = IWL_LED_BLINK,                              \
        .internal_wimax_coex = true
 
index b5ab8d1bcac0222a200eace4a24df70b15e9d008..30d45e2fc193a5bc04987acbdef9b0252a2b5c01 100644 (file)
@@ -82,10 +82,6 @@ static const struct iwl_base_params iwl6000_base_params = {
        .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
        .shadow_ram_support = true,
        .led_compensation = 51,
-       .adv_thermal_throttle = true,
-       .support_ct_kill_exit = true,
-       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
-       .chain_noise_scale = 1000,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
@@ -98,10 +94,6 @@ static const struct iwl_base_params iwl6050_base_params = {
        .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
        .shadow_ram_support = true,
        .led_compensation = 51,
-       .adv_thermal_throttle = true,
-       .support_ct_kill_exit = true,
-       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
-       .chain_noise_scale = 1500,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 1024,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
@@ -114,10 +106,6 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
        .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
        .shadow_ram_support = true,
        .led_compensation = 57,
-       .adv_thermal_throttle = true,
-       .support_ct_kill_exit = true,
-       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
-       .chain_noise_scale = 1000,
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
        .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
@@ -129,15 +117,6 @@ static const struct iwl_ht_params iwl6000_ht_params = {
        .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
 };
 
-static const struct iwl_bt_params iwl6000_bt_params = {
-       /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
-       .advanced_bt_coexist = true,
-       .agg_time_limit = BT_AGG_THRESHOLD_DEF,
-       .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
-       .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
-       .bt_sco_disable = true,
-};
-
 static const struct iwl_eeprom_params iwl6000_eeprom_params = {
        .regulatory_bands = {
                EEPROM_REG_BAND_1_CHANNELS,
@@ -163,7 +142,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
        .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION,  \
        .base_params = &iwl6000_g2_base_params,                 \
        .eeprom_params = &iwl6000_eeprom_params,                \
-       .need_temp_offset_calib = true,                         \
        .led_mode = IWL_LED_RF_STATE
 
 const struct iwl_cfg iwl6005_2agn_cfg = {
@@ -217,11 +195,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
        .nvm_ver = EEPROM_6030_EEPROM_VERSION,          \
        .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION,  \
        .base_params = &iwl6000_g2_base_params,                 \
-       .bt_params = &iwl6000_bt_params,                        \
        .eeprom_params = &iwl6000_eeprom_params,                \
-       .need_temp_offset_calib = true,                         \
-       .led_mode = IWL_LED_RF_STATE,                           \
-       .adv_pm = true                                          \
+       .led_mode = IWL_LED_RF_STATE
 
 const struct iwl_cfg iwl6030_2agn_cfg = {
        .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
@@ -256,11 +231,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
        .nvm_ver = EEPROM_6030_EEPROM_VERSION,          \
        .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION,  \
        .base_params = &iwl6000_g2_base_params,                 \
-       .bt_params = &iwl6000_bt_params,                        \
        .eeprom_params = &iwl6000_eeprom_params,                \
-       .need_temp_offset_calib = true,                         \
-       .led_mode = IWL_LED_RF_STATE,                           \
-       .adv_pm = true
+       .led_mode = IWL_LED_RF_STATE
 
 const struct iwl_cfg iwl6035_2agn_cfg = {
        .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
index 50263e87fe156509d4e1c0328cb12f5bdec993a8..d4f3b4864ab147d953729d259310419a45626491 100644 (file)
@@ -96,13 +96,9 @@ static const struct iwl_base_params iwl7000_base_params = {
        .pll_cfg_val = 0,
        .shadow_ram_support = true,
        .led_compensation = 57,
-       .adv_thermal_throttle = true,
-       .support_ct_kill_exit = true,
-       .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
-       .chain_noise_scale = 1000,
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
-       .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+       .shadow_reg_enable = true,
 };
 
 static const struct iwl_ht_params iwl7000_ht_params = {
@@ -118,14 +114,11 @@ static const struct iwl_ht_params iwl7000_ht_params = {
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
        .max_data_size = IWL60_RTC_DATA_SIZE,                   \
        .base_params = &iwl7000_base_params,                    \
-       /* TODO: .bt_params? */                                 \
-       .need_temp_offset_calib = true,                         \
-       .led_mode = IWL_LED_RF_STATE,                           \
-       .adv_pm = true                                          \
+       .led_mode = IWL_LED_RF_STATE
 
 
 const struct iwl_cfg iwl7260_2ac_cfg = {
-       .name = "Intel(R) Dual Band Wireless AC7260",
+       .name = "Intel(R) Dual Band Wireless AC 7260",
        .fw_name_pre = IWL7260_FW_PRE,
        IWL_DEVICE_7000,
        .ht_params = &iwl7000_ht_params,
@@ -133,8 +126,44 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
        .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
 };
 
-const struct iwl_cfg iwl3160_ac_cfg = {
-       .name = "Intel(R) Dual Band Wireless AC3160",
+const struct iwl_cfg iwl7260_2n_cfg = {
+       .name = "Intel(R) Dual Band Wireless N 7260",
+       .fw_name_pre = IWL7260_FW_PRE,
+       IWL_DEVICE_7000,
+       .ht_params = &iwl7000_ht_params,
+       .nvm_ver = IWL7260_NVM_VERSION,
+       .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl7260_n_cfg = {
+       .name = "Intel(R) Wireless N 7260",
+       .fw_name_pre = IWL7260_FW_PRE,
+       IWL_DEVICE_7000,
+       .ht_params = &iwl7000_ht_params,
+       .nvm_ver = IWL7260_NVM_VERSION,
+       .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl3160_2ac_cfg = {
+       .name = "Intel(R) Dual Band Wireless AC 3160",
+       .fw_name_pre = IWL3160_FW_PRE,
+       IWL_DEVICE_7000,
+       .ht_params = &iwl7000_ht_params,
+       .nvm_ver = IWL3160_NVM_VERSION,
+       .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl3160_2n_cfg = {
+       .name = "Intel(R) Dual Band Wireless N 3160",
+       .fw_name_pre = IWL3160_FW_PRE,
+       IWL_DEVICE_7000,
+       .ht_params = &iwl7000_ht_params,
+       .nvm_ver = IWL3160_NVM_VERSION,
+       .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl3160_n_cfg = {
+       .name = "Intel(R) Wireless N 3160",
        .fw_name_pre = IWL3160_FW_PRE,
        IWL_DEVICE_7000,
        .ht_params = &iwl7000_ht_params,
index c38aa8f775545b37b6b6ea215b435a63481c7717..0189b9050f22d5ec2720612b74d6b501cba0a89e 100644 (file)
@@ -136,17 +136,9 @@ enum iwl_led_mode {
  * @led_compensation: compensate on the led on/off time per HW according
  *     to the deviation to achieve the desired led frequency.
  *     The detail algorithm is described in iwl-led.c
- * @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @adv_thermal_throttle: support advance thermal throttle
- * @support_ct_kill_exit: support ct kill exit condition
- * @plcp_delta_threshold: plcp error rate threshold used to trigger
- *     radio tuning when there is a high receiving plcp error rate
- * @chain_noise_scale: default chain noise scale used for gain computation
  * @wd_timeout: TX queues watchdog timeout
  * @max_event_log_size: size of event log buffer size for ucode event logging
  * @shadow_reg_enable: HW shadow register support
- * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
- * @no_idle_support: do not support idle mode
  */
 struct iwl_base_params {
        int eeprom_size;
@@ -157,31 +149,9 @@ struct iwl_base_params {
        const u16 max_ll_items;
        const bool shadow_ram_support;
        u16 led_compensation;
-       bool adv_thermal_throttle;
-       bool support_ct_kill_exit;
-       u8 plcp_delta_threshold;
-       s32 chain_noise_scale;
        unsigned int wd_timeout;
        u32 max_event_log_size;
        const bool shadow_reg_enable;
-       const bool hd_v2;
-       const bool no_idle_support;
-};
-
-/*
- * @advanced_bt_coexist: support advanced bt coexist
- * @bt_init_traffic_load: specify initial bt traffic load
- * @bt_prio_boost: default bt priority boost value
- * @agg_time_limit: maximum number of uSec in aggregation
- * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
- */
-struct iwl_bt_params {
-       bool advanced_bt_coexist;
-       u8 bt_init_traffic_load;
-       u32 bt_prio_boost;
-       u16 agg_time_limit;
-       bool bt_sco_disable;
-       bool bt_session_2;
 };
 
 /*
@@ -231,16 +201,10 @@ struct iwl_eeprom_params {
  * @nvm_calib_ver: NVM calibration version
  * @lib: pointer to the lib ops
  * @base_params: pointer to basic parameters
- * @ht_params: point to ht patameters
- * @bt_params: pointer to bt parameters
- * @need_temp_offset_calib: need to perform temperature offset calibration
- * @no_xtal_calib: some devices do not need crystal calibration data,
- *     don't send it to those
+ * @ht_params: point to ht parameters
  * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- * @adv_pm: advance power management
  * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
  * @internal_wimax_coex: internal wifi/wimax combo device
- * @temp_offset_v2: support v2 of temperature offset calibration
  *
  * We enable the driver to be backward compatible wrt. hardware features.
  * API differences in uCode shouldn't be handled here but through TLVs
@@ -264,20 +228,16 @@ struct iwl_cfg {
        const struct iwl_base_params *base_params;
        /* params likely to change within a device family */
        const struct iwl_ht_params *ht_params;
-       const struct iwl_bt_params *bt_params;
        const struct iwl_eeprom_params *eeprom_params;
-       const bool need_temp_offset_calib; /* if used set to true */
-       const bool no_xtal_calib;
        enum iwl_led_mode led_mode;
-       const bool adv_pm;
        const bool rx_with_siso_diversity;
        const bool internal_wimax_coex;
-       const bool temp_offset_v2;
 };
 
 /*
  * This list declares the config structures for all devices.
  */
+#if IS_ENABLED(CONFIG_IWLDVM)
 extern const struct iwl_cfg iwl5300_agn_cfg;
 extern const struct iwl_cfg iwl5100_agn_cfg;
 extern const struct iwl_cfg iwl5350_agn_cfg;
@@ -319,7 +279,14 @@ extern const struct iwl_cfg iwl6035_2agn_cfg;
 extern const struct iwl_cfg iwl105_bgn_cfg;
 extern const struct iwl_cfg iwl105_bgn_d_cfg;
 extern const struct iwl_cfg iwl135_bgn_cfg;
+#endif /* CONFIG_IWLDVM */
+#if IS_ENABLED(CONFIG_IWLMVM)
 extern const struct iwl_cfg iwl7260_2ac_cfg;
-extern const struct iwl_cfg iwl3160_ac_cfg;
+extern const struct iwl_cfg iwl7260_2n_cfg;
+extern const struct iwl_cfg iwl7260_n_cfg;
+extern const struct iwl_cfg iwl3160_2ac_cfg;
+extern const struct iwl_cfg iwl3160_2n_cfg;
+extern const struct iwl_cfg iwl3160_n_cfg;
+#endif /* CONFIG_IWLMVM */
 
 #endif /* __IWL_CONFIG_H__ */
index 20e845d4da04304b0493b3d8c6fa3f29b333a427..a276af476e2d7ab64e859748c9b8fd48b84ebb80 100644 (file)
 #define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
 #define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
 
+/*****************************************************************************
+ *                        7000/3000 series SHR DTS addresses                 *
+ *****************************************************************************/
+
+/* Diode Results Register Structure: */
+enum dtd_diode_reg {
+       DTS_DIODE_REG_DIG_VAL                   = 0x000000FF, /* bits [7:0] */
+       DTS_DIODE_REG_VREF_LOW                  = 0x0000FF00, /* bits [15:8] */
+       DTS_DIODE_REG_VREF_HIGH                 = 0x00FF0000, /* bits [23:16] */
+       DTS_DIODE_REG_VREF_ID                   = 0x03000000, /* bits [25:24] */
+       DTS_DIODE_REG_PASS_ONCE                 = 0x80000000, /* bits [31:31] */
+       DTS_DIODE_REG_FLAGS_MSK                 = 0xFF000000, /* bits [31:24] */
+/* Those are the masks INSIDE the flags bit-field: */
+       DTS_DIODE_REG_FLAGS_VREFS_ID_POS        = 0,
+       DTS_DIODE_REG_FLAGS_VREFS_ID            = 0x00000003, /* bits [1:0] */
+       DTS_DIODE_REG_FLAGS_PASS_ONCE_POS       = 7,
+       DTS_DIODE_REG_FLAGS_PASS_ONCE           = 0x00000080, /* bits [7:7] */
+};
+
 #endif /* !__iwl_csr_h__ */
index 8cf5db7fb5c9ce309349add2dd2222df3c75ebe2..7edb8519c8a49eadc3f5798fa8fa08bfbd25fe04 100644 (file)
 
 static inline bool iwl_have_debug_level(u32 level)
 {
+#ifdef CONFIG_IWLWIFI_DEBUG
        return iwlwifi_mod_params.debug_level & level;
+#else
+       return false;
+#endif
 }
 
 void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
index 40fed1f511e23e546d64ec1d7c69b3c9f196a16a..2f690e578b5c75904b9df8a6beb3db7f4c6e2c9c 100644 (file)
@@ -1236,6 +1236,9 @@ MODULE_PARM_DESC(wd_disable,
                "Disable stuck queue watchdog timer 0=system default, "
                "1=disable, 2=enable (default: 0)");
 
+module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
+MODULE_PARM_DESC(nvm_file, "NVM file name");
+
 /*
  * set bt_coex_active to true, uCode will do kill/defer
  * every time the priority line is asserted (BT is sending signals on the
index 7d145091630880ceafe9b4626f37df95e976e4e2..429337a2b9a15189c0c0fefc94389a884e3b43f3 100644 (file)
@@ -62,8 +62,7 @@
 
 #ifndef __iwl_drv_h__
 #define __iwl_drv_h__
-
-#include <linux/module.h>
+#include <linux/export.h>
 
 /* for all modules */
 #define DRV_NAME        "iwlwifi"
index 600c9fdd7f710fc33091971f0162e20951534e29..4c887f3659089ea3451bc9cc4f2a8dc44c764601 100644 (file)
@@ -732,17 +732,16 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
 void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
                          struct iwl_nvm_data *data,
                          struct ieee80211_sta_ht_cap *ht_info,
-                         enum ieee80211_band band)
+                         enum ieee80211_band band,
+                         u8 tx_chains, u8 rx_chains)
 {
        int max_bit_rate = 0;
-       u8 rx_chains;
-       u8 tx_chains;
 
-       tx_chains = hweight8(data->valid_tx_ant);
+       tx_chains = hweight8(tx_chains);
        if (cfg->rx_with_siso_diversity)
                rx_chains = 1;
        else
-               rx_chains = hweight8(data->valid_rx_ant);
+               rx_chains = hweight8(rx_chains);
 
        if (!(data->sku_cap_11n_enable) || !cfg->ht_params) {
                ht_info->ht_supported = false;
@@ -806,7 +805,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
        sband->n_bitrates = N_RATES_24;
        n_used += iwl_init_sband_channels(data, sband, n_channels,
                                          IEEE80211_BAND_2GHZ);
-       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
+                            data->valid_tx_ant, data->valid_rx_ant);
 
        sband = &data->bands[IEEE80211_BAND_5GHZ];
        sband->band = IEEE80211_BAND_5GHZ;
@@ -814,7 +814,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
        sband->n_bitrates = N_RATES_52;
        n_used += iwl_init_sband_channels(data, sband, n_channels,
                                          IEEE80211_BAND_5GHZ);
-       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
+                            data->valid_tx_ant, data->valid_rx_ant);
 
        if (n_channels != n_used)
                IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n",
index 37f115390b1923f18b9e8fb990bf6a326c55ceed..d73304a23ec203dbdf1df8cc65e7092b2d5aaa37 100644 (file)
@@ -133,6 +133,7 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
 void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
                          struct iwl_nvm_data *data,
                          struct ieee80211_sta_ht_cap *ht_info,
-                         enum ieee80211_band band);
+                         enum ieee80211_band band,
+                         u8 tx_chains, u8 rx_chains);
 
 #endif /* __iwl_eeprom_parse_h__ */
index c4c446d41eb081bf0f7eb37034a3137faf5a281e..f844d5c748c09ef49418cf6c7999e0146a188a77 100644 (file)
@@ -106,11 +106,14 @@ enum iwl_ucode_type {
 
 /*
  * enumeration of ucode section.
- * This enumeration is used for legacy tlv style (before 16.0 uCode).
+ * This enumeration is used directly for older firmware (before 16.0).
+ * For new firmware, there can be up to 4 sections (see below) but the
+ * first one packaged into the firmware file is the DATA section and
+ * some debugging code accesses that.
  */
 enum iwl_ucode_sec {
-       IWL_UCODE_SECTION_INST,
        IWL_UCODE_SECTION_DATA,
+       IWL_UCODE_SECTION_INST,
 };
 /*
  * For 16.0 uCode and above, there is no differentiation between sections,
index d6f6c37c09fd3a395406b67f2e98300b4926d53d..d4ad505b0a4bb9668cdc2d788f46986194d592ef 100644 (file)
@@ -115,10 +115,13 @@ struct iwl_mod_params {
        int led_mode;
        bool power_save;
        int power_level;
+#ifdef CONFIG_IWLWIFI_DEBUG
        u32 debug_level;
+#endif
        int ant_coupling;
        bool bt_ch_announce;
        bool auto_agg;
+       char *nvm_file;
 };
 
 #endif /* #__iwl_modparams_h__ */
index 6199a0a597a6f5c60df627960d616e674903a8ae..acd2665afb8cd9c942cee42527b62acc923a6749 100644 (file)
@@ -89,6 +89,7 @@ enum nvm_sku_bits {
        NVM_SKU_CAP_BAND_24GHZ  = BIT(0),
        NVM_SKU_CAP_BAND_52GHZ  = BIT(1),
        NVM_SKU_CAP_11N_ENABLE  = BIT(2),
+       NVM_SKU_CAP_11AC_ENABLE = BIT(3),
 };
 
 /* radio config bits (actual values from NVM definition) */
@@ -258,8 +259,6 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
                                  struct iwl_nvm_data *data,
                                  struct ieee80211_sta_vht_cap *vht_cap)
 {
-       /* For now, assume new devices with NVM are VHT capable */
-
        vht_cap->vht_supported = true;
 
        vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
@@ -292,7 +291,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
 }
 
 static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
-                           struct iwl_nvm_data *data, const __le16 *nvm_sw)
+                           struct iwl_nvm_data *data, const __le16 *nvm_sw,
+                           bool enable_vht, u8 tx_chains, u8 rx_chains)
 {
        int n_channels = iwl_init_channel_map(dev, cfg, data,
                        &nvm_sw[NVM_CHANNELS]);
@@ -305,7 +305,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
        sband->n_bitrates = N_RATES_24;
        n_used += iwl_init_sband_channels(data, sband, n_channels,
                                          IEEE80211_BAND_2GHZ);
-       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
+                            tx_chains, rx_chains);
 
        sband = &data->bands[IEEE80211_BAND_5GHZ];
        sband->band = IEEE80211_BAND_5GHZ;
@@ -313,8 +314,10 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
        sband->n_bitrates = N_RATES_52;
        n_used += iwl_init_sband_channels(data, sband, n_channels,
                                          IEEE80211_BAND_5GHZ);
-       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
-       iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
+       iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
+                            tx_chains, rx_chains);
+       if (enable_vht)
+               iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
 
        if (n_channels != n_used)
                IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
@@ -324,7 +327,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_hw, const __le16 *nvm_sw,
-                  const __le16 *nvm_calib)
+                  const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains)
 {
        struct iwl_nvm_data *data;
        u8 hw_addr[ETH_ALEN];
@@ -380,7 +383,8 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
        data->hw_addr[4] = hw_addr[5];
        data->hw_addr[5] = hw_addr[4];
 
-       iwl_init_sbands(dev, cfg, data, nvm_sw);
+       iwl_init_sbands(dev, cfg, data, nvm_sw, sku & NVM_SKU_CAP_11AC_ENABLE,
+                       tx_chains, rx_chains);
 
        data->calib_version = 255;   /* TODO:
                                        this value will prevent some checks from
index e57fb989661e0617487cd314b351f3915975e424..3325059c52d49d97c289fab319946fddd0a40c57 100644 (file)
@@ -75,6 +75,6 @@
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_hw, const __le16 *nvm_sw,
-                  const __le16 *nvm_calib);
+                  const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains);
 
 #endif /* __iwl_nvm_parse_h__ */
index 25745daa0d5da4142abef88f506b8bee614d8905..1a405ae6a9c59a00f95936f25bb2602abf1751d7 100644 (file)
@@ -92,20 +92,16 @@ struct iwl_phy_db_entry {
 struct iwl_phy_db {
        struct iwl_phy_db_entry cfg;
        struct iwl_phy_db_entry calib_nch;
-       struct iwl_phy_db_entry calib_ch;
        struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
        struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
 
-       u32 channel_num;
-       u32 channel_size;
-
        struct iwl_trans *trans;
 };
 
 enum iwl_phy_db_section_type {
        IWL_PHY_DB_CFG = 1,
        IWL_PHY_DB_CALIB_NCH,
-       IWL_PHY_DB_CALIB_CH,
+       IWL_PHY_DB_UNUSED,
        IWL_PHY_DB_CALIB_CHG_PAPD,
        IWL_PHY_DB_CALIB_CHG_TXP,
        IWL_PHY_DB_MAX
@@ -169,8 +165,6 @@ iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
                return &phy_db->cfg;
        case IWL_PHY_DB_CALIB_NCH:
                return &phy_db->calib_nch;
-       case IWL_PHY_DB_CALIB_CH:
-               return &phy_db->calib_ch;
        case IWL_PHY_DB_CALIB_CHG_PAPD:
                if (chg_id >= IWL_NUM_PAPD_CH_GROUPS)
                        return NULL;
@@ -208,7 +202,6 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db)
 
        iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
        iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
-       iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
        for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
                iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
        for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
@@ -248,13 +241,6 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
 
        entry->size = size;
 
-       if (type == IWL_PHY_DB_CALIB_CH) {
-               phy_db->channel_num =
-                       le32_to_cpup((__le32 *)phy_db_notif->data);
-               phy_db->channel_size =
-                       (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
-       }
-
        IWL_DEBUG_INFO(phy_db->trans,
                       "%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
                       __func__, __LINE__, type, size);
@@ -328,10 +314,7 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
                                u32 type, u8 **data, u16 *size, u16 ch_id)
 {
        struct iwl_phy_db_entry *entry;
-       u32 channel_num;
-       u32 channel_size;
        u16 ch_group_id = 0;
-       u16 index;
 
        if (!phy_db)
                return -EINVAL;
@@ -346,21 +329,8 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
        if (!entry)
                return -EINVAL;
 
-       if (type == IWL_PHY_DB_CALIB_CH) {
-               index = ch_id_to_ch_index(ch_id);
-               channel_num = phy_db->channel_num;
-               channel_size = phy_db->channel_size;
-               if (index >= channel_num) {
-                       IWL_ERR(phy_db->trans, "Wrong channel number %d\n",
-                               ch_id);
-                       return -EINVAL;
-               }
-               *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
-               *size = channel_size;
-       } else {
-               *data = entry->data;
-               *size = entry->size;
-       }
+       *data = entry->data;
+       *size = entry->size;
 
        IWL_DEBUG_INFO(phy_db->trans,
                       "%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
@@ -413,6 +383,9 @@ static int iwl_phy_db_send_all_channel_groups(
                if (!entry)
                        return -EINVAL;
 
+               if (WARN_ON_ONCE(!entry->size))
+                       continue;
+
                /* Send the requested PHY DB section */
                err = iwl_send_phy_db_cmd(phy_db,
                                          type,
index 386f2a7c87cb731b39aec7dd03c53f03e5e17725..ff8cc75c189d4d842abf8611fb8c5e7c7a63bf38 100644 (file)
 /* Device system time */
 #define DEVICE_SYSTEM_TIME_REG 0xA0206C
 
+/*****************************************************************************
+ *                        7000/3000 series SHR DTS addresses                 *
+ *****************************************************************************/
+
+#define SHR_MISC_WFM_DTS_EN    (0x00a10024)
+#define DTSC_CFG_MODE          (0x00a10604)
+#define DTSC_VREF_AVG          (0x00a10648)
+#define DTSC_VREF5_AVG         (0x00a1064c)
+#define DTSC_CFG_MODE_PERIODIC (0x2)
+#define DTSC_PTAT_AVG          (0x00a10650)
+
+
 /**
  * Tx Scheduler
  *
index 7a13790b5bfe5e20feb8db54edd2fc4ef738187d..be4b2ac3dbbf008dbe872d11359bbc5bcc55d2a5 100644 (file)
@@ -189,7 +189,8 @@ enum CMD_MODE {
        CMD_SYNC                = 0,
        CMD_ASYNC               = BIT(0),
        CMD_WANT_SKB            = BIT(1),
-       CMD_ON_DEMAND           = BIT(2),
+       CMD_SEND_IN_RFKILL      = BIT(2),
+       CMD_ON_DEMAND           = BIT(3),
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -427,8 +428,9 @@ struct iwl_trans_ops {
        void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
        void (*stop_device)(struct iwl_trans *trans);
 
-       void (*d3_suspend)(struct iwl_trans *trans);
-       int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status);
+       void (*d3_suspend)(struct iwl_trans *trans, bool test);
+       int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
+                        bool test);
 
        int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 
@@ -455,7 +457,7 @@ struct iwl_trans_ops {
        int (*read_mem)(struct iwl_trans *trans, u32 addr,
                        void *buf, int dwords);
        int (*write_mem)(struct iwl_trans *trans, u32 addr,
-                        void *buf, int dwords);
+                        const void *buf, int dwords);
        void (*configure)(struct iwl_trans *trans,
                          const struct iwl_trans_config *trans_cfg);
        void (*set_pmi)(struct iwl_trans *trans, bool state);
@@ -587,17 +589,18 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
        trans->state = IWL_TRANS_NO_FW;
 }
 
-static inline void iwl_trans_d3_suspend(struct iwl_trans *trans)
+static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
 {
        might_sleep();
-       trans->ops->d3_suspend(trans);
+       trans->ops->d3_suspend(trans, test);
 }
 
 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
-                                     enum iwl_d3_status *status)
+                                     enum iwl_d3_status *status,
+                                     bool test)
 {
        might_sleep();
-       return trans->ops->d3_resume(trans, status);
+       return trans->ops->d3_resume(trans, status, test);
 }
 
 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
@@ -761,7 +764,7 @@ static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
 }
 
 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
-                                     void *buf, int dwords)
+                                     const void *buf, int dwords)
 {
        return trans->ops->write_mem(trans, addr, buf, dwords);
 }
index 2acc44b40986868437a92be40adcb30ecaa1a6fd..ff856e543ae85689549fbd1fcae64320a2ca67fa 100644 (file)
@@ -3,7 +3,7 @@ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
 iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
 iwlmvm-y += scan.o time-event.o rs.o
 iwlmvm-y += power.o bt-coex.o
-iwlmvm-y += led.o
+iwlmvm-y += led.o tt.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
 iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
 
index 810bfa5f6de0a906058422be4a7d26d6329a5698..9a4d94a1f90d66deb8003e455c4499a86ccfc50d 100644 (file)
@@ -174,7 +174,7 @@ static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = {
 static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = {
        cpu_to_le32(0xaaaaaaaa),
        cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaeaaaaaa),
+       cpu_to_le32(0xaaaaaaaa),
        cpu_to_le32(0xaaaaaaaa),
        cpu_to_le32(0xcc00ff28),
        cpu_to_le32(0x0000aaaa),
@@ -351,6 +351,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        enum ieee80211_band band;
        int ave_rssi;
 
+       lockdep_assert_held(&mvm->mutex);
        if (vif->type != NL80211_IFTYPE_STATION)
                return;
 
@@ -365,7 +366,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
        if (band != IEEE80211_BAND_2GHZ) {
-               ieee80211_request_smps(vif, smps_mode);
+               iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+                                   smps_mode);
                return;
        }
 
@@ -380,7 +382,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
                       mvmvif->id,  data->notif->bt_status,
                       data->notif->bt_traffic_load, smps_mode);
 
-       ieee80211_request_smps(vif, smps_mode);
+       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
 
        /* don't reduce the Tx power if in loose scheme */
        if (is_loose_coex())
index 16bbdcc8627ae568db4734cf5c61458397162b1e..8c49db02c9c15969149687d576b891f3f5a9cd76 100644 (file)
@@ -63,6 +63,7 @@
 
 #include <linux/etherdevice.h>
 #include <linux/ip.h>
+#include <linux/fs.h>
 #include <net/cfg80211.h>
 #include <net/ipv6.h>
 #include <net/tcp.h>
@@ -419,8 +420,7 @@ static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
        return cpu_to_le16(be16_to_cpu((__force __be16)check));
 }
 
-static void iwl_mvm_build_tcp_packet(struct iwl_mvm *mvm,
-                                    struct ieee80211_vif *vif,
+static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
                                     struct cfg80211_wowlan_tcp *tcp,
                                     void *_pkt, u8 *mask,
                                     __le16 *pseudo_hdr_csum,
@@ -566,21 +566,21 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
 
        /* SYN (TX) */
        iwl_mvm_build_tcp_packet(
-               mvm, vif, tcp, cfg->syn_tx.data, NULL,
+               vif, tcp, cfg->syn_tx.data, NULL,
                &cfg->syn_tx.info.tcp_pseudo_header_checksum,
                MVM_TCP_TX_SYN);
        cfg->syn_tx.info.tcp_payload_length = 0;
 
        /* SYN/ACK (RX) */
        iwl_mvm_build_tcp_packet(
-               mvm, vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
+               vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
                &cfg->synack_rx.info.tcp_pseudo_header_checksum,
                MVM_TCP_RX_SYNACK);
        cfg->synack_rx.info.tcp_payload_length = 0;
 
        /* KEEPALIVE/ACK (TX) */
        iwl_mvm_build_tcp_packet(
-               mvm, vif, tcp, cfg->keepalive_tx.data, NULL,
+               vif, tcp, cfg->keepalive_tx.data, NULL,
                &cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
                MVM_TCP_TX_DATA);
        cfg->keepalive_tx.info.tcp_payload_length =
@@ -604,7 +604,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
 
        /* ACK (RX) */
        iwl_mvm_build_tcp_packet(
-               mvm, vif, tcp, cfg->keepalive_ack_rx.data,
+               vif, tcp, cfg->keepalive_ack_rx.data,
                cfg->keepalive_ack_rx.rx_mask,
                &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
                MVM_TCP_RX_ACK);
@@ -612,7 +612,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
 
        /* WAKEUP (RX) */
        iwl_mvm_build_tcp_packet(
-               mvm, vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
+               vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
                &cfg->wake_rx.info.tcp_pseudo_header_checksum,
                MVM_TCP_RX_WAKE);
        cfg->wake_rx.info.tcp_payload_length =
@@ -620,7 +620,7 @@ static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
 
        /* FIN */
        iwl_mvm_build_tcp_packet(
-               mvm, vif, tcp, cfg->fin_tx.data, NULL,
+               vif, tcp, cfg->fin_tx.data, NULL,
                &cfg->fin_tx.info.tcp_pseudo_header_checksum,
                MVM_TCP_TX_FIN);
        cfg->fin_tx.info.tcp_payload_length = 0;
@@ -756,7 +756,9 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        return 0;
 }
 
-int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
+                            struct cfg80211_wowlan *wowlan,
+                            bool test)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_d3_iter_data suspend_iter_data = {
@@ -769,7 +771,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
        struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
        struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
        struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
-       struct iwl_d3_manager_config d3_cfg_cmd = {
+       struct iwl_d3_manager_config d3_cfg_cmd_data = {
                /*
                 * Program the minimum sleep time to 10 seconds, as many
                 * platforms have issues processing a wakeup signal while
@@ -777,17 +779,30 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                 */
                .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
        };
+       struct iwl_host_cmd d3_cfg_cmd = {
+               .id = D3_CONFIG_CMD,
+               .flags = CMD_SYNC | CMD_WANT_SKB,
+               .data[0] = &d3_cfg_cmd_data,
+               .len[0] = sizeof(d3_cfg_cmd_data),
+       };
        struct wowlan_key_data key_data = {
                .use_rsc_tsc = false,
                .tkip = &tkip_cmd,
                .use_tkip = false,
        };
        int ret, i;
+       int len __maybe_unused;
        u16 seq;
        u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
 
-       if (WARN_ON(!wowlan))
+       if (!wowlan) {
+               /*
+                * mac80211 shouldn't get here, but for D3 test
+                * it doesn't warrant a warning
+                */
+               WARN_ON(!test);
                return -EINVAL;
+       }
 
        key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
        if (!key_data.rsc_tsc)
@@ -1007,15 +1022,31 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
        if (ret)
                goto out;
 
+       ret = iwl_mvm_power_update_mode(mvm, vif);
+       if (ret)
+               goto out;
+
        /* must be last -- this switches firmware state */
-       ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC,
-                                  sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+       ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
        if (ret)
                goto out;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       len = le32_to_cpu(d3_cfg_cmd.resp_pkt->len_n_flags) &
+               FH_RSCSR_FRAME_SIZE_MSK;
+       if (len >= sizeof(u32) * 2) {
+               mvm->d3_test_pme_ptr =
+                       le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
+       } else if (test) {
+               /* in test mode we require the pointer */
+               ret = -EIO;
+               goto out;
+       }
+#endif
+       iwl_free_resp(&d3_cfg_cmd);
 
        clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
 
-       iwl_trans_d3_suspend(mvm->trans);
+       iwl_trans_d3_suspend(mvm->trans, test);
  out:
        mvm->aux_sta.sta_id = old_aux_sta_id;
        mvm_ap_sta->sta_id = old_ap_sta_id;
@@ -1030,6 +1061,11 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
        return ret;
 }
 
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+       return __iwl_mvm_suspend(hw, wowlan, false);
+}
+
 static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
                                         struct ieee80211_vif *vif)
 {
@@ -1214,9 +1250,28 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
        iwl_free_resp(&cmd);
 }
 
-int iwl_mvm_resume(struct ieee80211_hw *hw)
+static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
+{
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN];
+       u32 len = img->sec[IWL_UCODE_SECTION_DATA].len;
+       u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+
+       if (!mvm->store_d3_resume_sram)
+               return;
+
+       if (!mvm->d3_resume_sram) {
+               mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
+               if (!mvm->d3_resume_sram)
+                       return;
+       }
+
+       iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
+#endif
+}
+
+static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
 {
-       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_d3_iter_data resume_iter_data = {
                .mvm = mvm,
        };
@@ -1236,7 +1291,7 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
 
        vif = resume_iter_data.vif;
 
-       ret = iwl_trans_d3_resume(mvm->trans, &d3_status);
+       ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
        if (ret)
                goto out_unlock;
 
@@ -1245,12 +1300,15 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
                goto out_unlock;
        }
 
+       /* query SRAM first in case we want event logging */
+       iwl_mvm_read_d3_sram(mvm);
+
        iwl_mvm_query_wakeup_reasons(mvm, vif);
 
  out_unlock:
        mutex_unlock(&mvm->mutex);
 
-       if (vif)
+       if (!test && vif)
                ieee80211_resume_disconnect(vif);
 
        /* return 1 to reconfigure the device */
@@ -1258,9 +1316,106 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
        return 1;
 }
 
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       return __iwl_mvm_resume(mvm, false);
+}
+
 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
        device_set_wakeup_enable(mvm->trans->dev, enabled);
 }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
+{
+       struct iwl_mvm *mvm = inode->i_private;
+       int err;
+
+       if (mvm->d3_test_active)
+               return -EBUSY;
+
+       file->private_data = inode->i_private;
+
+       ieee80211_stop_queues(mvm->hw);
+       synchronize_net();
+
+       /* start pseudo D3 */
+       rtnl_lock();
+       err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
+       rtnl_unlock();
+       if (err > 0)
+               err = -EINVAL;
+       if (err) {
+               ieee80211_wake_queues(mvm->hw);
+               return err;
+       }
+       mvm->d3_test_active = true;
+       return 0;
+}
+
+static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       u32 pme_asserted;
+
+       while (true) {
+               pme_asserted = iwl_trans_read_mem32(mvm->trans,
+                                                   mvm->d3_test_pme_ptr);
+               if (pme_asserted)
+                       break;
+               if (msleep_interruptible(100))
+                       break;
+       }
+
+       return 0;
+}
+
+static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
+                                             struct ieee80211_vif *vif)
+{
+       if (vif->type == NL80211_IFTYPE_STATION)
+               ieee80211_connection_loss(vif);
+}
+
+static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
+{
+       struct iwl_mvm *mvm = inode->i_private;
+       int remaining_time = 10;
+
+       mvm->d3_test_active = false;
+       __iwl_mvm_resume(mvm, true);
+       iwl_abort_notification_waits(&mvm->notif_wait);
+       ieee80211_restart_hw(mvm->hw);
+
+       /* wait for restart and disconnect all interfaces */
+       while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+              remaining_time > 0) {
+               remaining_time--;
+               msleep(1000);
+       }
+
+       if (remaining_time == 0)
+               IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
+
+       ieee80211_iterate_active_interfaces_atomic(
+               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+               iwl_mvm_d3_test_disconn_work_iter, NULL);
+
+       ieee80211_wake_queues(mvm->hw);
+
+       return 0;
+}
+
+const struct file_operations iwl_dbgfs_d3_test_ops = {
+       .llseek = no_llseek,
+       .open = iwl_mvm_d3_test_open,
+       .read = iwl_mvm_d3_test_read,
+       .release = iwl_mvm_d3_test_release,
+};
+#endif
index 2053dccefcd6875060a13b45224344614971d5cb..b7643c16201f67ca44779c3d040fef0184824f9b 100644 (file)
@@ -145,15 +145,18 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
        char *buf;
        u8 *ptr;
 
+       if (!mvm->ucode_loaded)
+               return -EINVAL;
+
        /* default is to dump the entire data segment */
        if (!mvm->dbgfs_sram_offset && !mvm->dbgfs_sram_len) {
-               mvm->dbgfs_sram_offset = 0x800000;
-               if (!mvm->ucode_loaded)
-                       return -EINVAL;
                img = &mvm->fw->img[mvm->cur_ucode];
-               mvm->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+               ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+               len = img->sec[IWL_UCODE_SECTION_DATA].len;
+       } else {
+               ofs = mvm->dbgfs_sram_offset;
+               len = mvm->dbgfs_sram_len;
        }
-       len = mvm->dbgfs_sram_len;
 
        bufsz = len * 4 + 256;
        buf = kzalloc(bufsz, GFP_KERNEL);
@@ -167,12 +170,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
        }
 
        pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", len);
-       pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
-                        mvm->dbgfs_sram_offset);
+       pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", ofs);
 
-       iwl_trans_read_mem_bytes(mvm->trans,
-                                mvm->dbgfs_sram_offset,
-                                ptr, len);
+       iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
        for (ofs = 0; ofs < len; ofs += 16) {
                pos += scnprintf(buf + pos, bufsz - pos, "0x%.4x ", ofs);
                hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
@@ -300,6 +300,146 @@ static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
        return count;
 }
 
+static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif,
+                                enum iwl_dbgfs_pm_mask param, int val)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
+
+       dbgfs_pm->mask |= param;
+
+       switch (param) {
+       case MVM_DEBUGFS_PM_KEEP_ALIVE: {
+               struct ieee80211_hw *hw = mvm->hw;
+               int dtimper = hw->conf.ps_dtim_period ?: 1;
+               int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+               IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
+               if (val * MSEC_PER_SEC < 3 * dtimper_msec) {
+                       IWL_WARN(mvm,
+                                "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
+                                val * MSEC_PER_SEC, 3 * dtimper_msec);
+               }
+               dbgfs_pm->keep_alive_seconds = val;
+               break;
+       }
+       case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
+               IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
+                               val ? "enabled" : "disabled");
+               dbgfs_pm->skip_over_dtim = val;
+               break;
+       case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
+               IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
+               dbgfs_pm->skip_dtim_periods = val;
+               break;
+       case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
+               IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
+               dbgfs_pm->rx_data_timeout = val;
+               break;
+       case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
+               IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
+               dbgfs_pm->tx_data_timeout = val;
+               break;
+       case MVM_DEBUGFS_PM_DISABLE_POWER_OFF:
+               IWL_DEBUG_POWER(mvm, "disable_power_off=%d\n", val);
+               dbgfs_pm->disable_power_off = val;
+               break;
+       }
+}
+
+static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->dbgfs_data;
+       enum iwl_dbgfs_pm_mask param;
+       char buf[32] = {};
+       int val;
+       int ret;
+
+       if (copy_from_user(buf, user_buf, sizeof(buf)))
+               return -EFAULT;
+
+       if (!strncmp("keep_alive=", buf, 11)) {
+               if (sscanf(buf + 11, "%d", &val) != 1)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_PM_KEEP_ALIVE;
+       } else if (!strncmp("skip_over_dtim=", buf, 15)) {
+               if (sscanf(buf + 15, "%d", &val) != 1)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
+       } else if (!strncmp("skip_dtim_periods=", buf, 18)) {
+               if (sscanf(buf + 18, "%d", &val) != 1)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
+       } else if (!strncmp("rx_data_timeout=", buf, 16)) {
+               if (sscanf(buf + 16, "%d", &val) != 1)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
+       } else if (!strncmp("tx_data_timeout=", buf, 16)) {
+               if (sscanf(buf + 16, "%d", &val) != 1)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
+       } else if (!strncmp("disable_power_off=", buf, 18)) {
+               if (sscanf(buf + 18, "%d", &val) != 1)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
+       } else {
+               return -EINVAL;
+       }
+
+       mutex_lock(&mvm->mutex);
+       iwl_dbgfs_update_pm(mvm, vif, param, val);
+       ret = iwl_mvm_power_update_mode(mvm, vif);
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->dbgfs_data;
+       struct iwl_powertable_cmd cmd = {};
+       char buf[256];
+       int bufsz = sizeof(buf);
+       int pos = 0;
+
+       iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+
+       pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+                        (cmd.flags &
+                        cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+                        0 : 1);
+       pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+                        le32_to_cpu(cmd.skip_dtim_periods));
+       pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
+                        iwlmvm_mod_params.power_scheme);
+       pos += scnprintf(buf+pos, bufsz-pos, "flags = %d\n",
+                        le16_to_cpu(cmd.flags));
+       pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
+                        cmd.keep_alive_seconds);
+
+       if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+               pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+                                (cmd.flags &
+                                cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
+                                1 : 0);
+               pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
+                                le32_to_cpu(cmd.rx_data_timeout));
+               pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
+                                le32_to_cpu(cmd.tx_data_timeout));
+       }
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
 static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
                                         char __user *user_buf,
                                         size_t count, loff_t *ppos)
@@ -481,6 +621,255 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
        return count;
 }
 
+static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
+                               enum iwl_dbgfs_bf_mask param, int value)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+       dbgfs_bf->mask |= param;
+
+       switch (param) {
+       case MVM_DEBUGFS_BF_ENERGY_DELTA:
+               dbgfs_bf->bf_energy_delta = value;
+               break;
+       case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
+               dbgfs_bf->bf_roaming_energy_delta = value;
+               break;
+       case MVM_DEBUGFS_BF_ROAMING_STATE:
+               dbgfs_bf->bf_roaming_state = value;
+               break;
+       case MVM_DEBUGFS_BF_TEMPERATURE_DELTA:
+               dbgfs_bf->bf_temperature_delta = value;
+               break;
+       case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
+               dbgfs_bf->bf_enable_beacon_filter = value;
+               break;
+       case MVM_DEBUGFS_BF_DEBUG_FLAG:
+               dbgfs_bf->bf_debug_flag = value;
+               break;
+       case MVM_DEBUGFS_BF_ESCAPE_TIMER:
+               dbgfs_bf->bf_escape_timer = value;
+               break;
+       case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
+               dbgfs_bf->ba_enable_beacon_abort = value;
+               break;
+       case MVM_DEBUGFS_BA_ESCAPE_TIMER:
+               dbgfs_bf->ba_escape_timer = value;
+               break;
+       }
+}
+
+static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
+                                        const char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->dbgfs_data;
+       enum iwl_dbgfs_bf_mask param;
+       char buf[256];
+       int buf_size;
+       int value;
+       int ret = 0;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       if (!strncmp("bf_energy_delta=", buf, 16)) {
+               if (sscanf(buf+16, "%d", &value) != 1)
+                       return -EINVAL;
+               if (value < IWL_BF_ENERGY_DELTA_MIN ||
+                   value > IWL_BF_ENERGY_DELTA_MAX)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_BF_ENERGY_DELTA;
+       } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
+               if (sscanf(buf+24, "%d", &value) != 1)
+                       return -EINVAL;
+               if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
+                   value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
+       } else if (!strncmp("bf_roaming_state=", buf, 17)) {
+               if (sscanf(buf+17, "%d", &value) != 1)
+                       return -EINVAL;
+               if (value < IWL_BF_ROAMING_STATE_MIN ||
+                   value > IWL_BF_ROAMING_STATE_MAX)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_BF_ROAMING_STATE;
+       } else if (!strncmp("bf_temperature_delta=", buf, 21)) {
+               if (sscanf(buf+21, "%d", &value) != 1)
+                       return -EINVAL;
+               if (value < IWL_BF_TEMPERATURE_DELTA_MIN ||
+                   value > IWL_BF_TEMPERATURE_DELTA_MAX)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_BF_TEMPERATURE_DELTA;
+       } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+               if (sscanf(buf+24, "%d", &value) != 1)
+                       return -EINVAL;
+               if (value < 0 || value > 1)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
+       } else if (!strncmp("bf_debug_flag=", buf, 14)) {
+               if (sscanf(buf+14, "%d", &value) != 1)
+                       return -EINVAL;
+               if (value < 0 || value > 1)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_BF_DEBUG_FLAG;
+       } else if (!strncmp("bf_escape_timer=", buf, 16)) {
+               if (sscanf(buf+16, "%d", &value) != 1)
+                       return -EINVAL;
+               if (value < IWL_BF_ESCAPE_TIMER_MIN ||
+                   value > IWL_BF_ESCAPE_TIMER_MAX)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
+       } else if (!strncmp("ba_escape_timer=", buf, 16)) {
+               if (sscanf(buf+16, "%d", &value) != 1)
+                       return -EINVAL;
+               if (value < IWL_BA_ESCAPE_TIMER_MIN ||
+                   value > IWL_BA_ESCAPE_TIMER_MAX)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
+       } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
+               if (sscanf(buf+23, "%d", &value) != 1)
+                       return -EINVAL;
+               if (value < 0 || value > 1)
+                       return -EINVAL;
+               param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
+       } else {
+               return -EINVAL;
+       }
+
+       mutex_lock(&mvm->mutex);
+       iwl_dbgfs_update_bf(vif, param, value);
+       if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
+               ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+       } else {
+               if (mvmvif->bf_enabled)
+                       ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+               else
+                       ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+       }
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_beacon_filter_cmd cmd = {
+               .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT,
+               .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT,
+               .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT,
+               .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT,
+               .bf_enable_beacon_filter = IWL_BF_ENABLE_BEACON_FILTER_DEFAULT,
+               .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT,
+               .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT),
+               .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT),
+               .ba_enable_beacon_abort = IWL_BA_ENABLE_BEACON_ABORT_DEFAULT,
+       };
+
+       iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+       if (mvmvif->bf_enabled)
+               cmd.bf_enable_beacon_filter = 1;
+       else
+               cmd.bf_enable_beacon_filter = 0;
+
+       pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
+                        cmd.bf_energy_delta);
+       pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
+                        cmd.bf_roaming_energy_delta);
+       pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
+                        cmd.bf_roaming_state);
+       pos += scnprintf(buf+pos, bufsz-pos, "bf_temperature_delta = %d\n",
+                        cmd.bf_temperature_delta);
+       pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
+                        cmd.bf_enable_beacon_filter);
+       pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
+                        cmd.bf_debug_flag);
+       pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
+                        cmd.bf_escape_timer);
+       pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
+                        cmd.ba_escape_timer);
+       pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
+                        cmd.ba_enable_beacon_abort);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static ssize_t iwl_dbgfs_d3_sram_write(struct file *file,
+                                      const char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       char buf[8] = {};
+       int store;
+
+       if (copy_from_user(buf, user_buf, sizeof(buf)))
+               return -EFAULT;
+
+       if (sscanf(buf, "%d", &store) != 1)
+               return -EINVAL;
+
+       mvm->store_d3_resume_sram = store;
+
+       return count;
+}
+
+static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
+                                     size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       const struct fw_img *img;
+       int ofs, len, pos = 0;
+       size_t bufsz, ret;
+       char *buf;
+       u8 *ptr = mvm->d3_resume_sram;
+
+       img = &mvm->fw->img[IWL_UCODE_WOWLAN];
+       len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+       bufsz = len * 4 + 256;
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n",
+                        mvm->store_d3_resume_sram ? "en" : "dis");
+
+       if (ptr) {
+               for (ofs = 0; ofs < len; ofs += 16) {
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                                        "0x%.4x ", ofs);
+                       hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
+                                          bufsz - pos, false);
+                       pos += strlen(buf + pos);
+                       if (bufsz - pos > 0)
+                               buf[pos++] = '\n';
+               }
+       } else {
+               pos += scnprintf(buf + pos, bufsz - pos,
+                                "(no data captured)\n");
+       }
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+
+       kfree(buf);
+
+       return ret;
+}
+#endif
+
 #define MVM_DEBUGFS_READ_FILE_OPS(name)                                        \
 static const struct file_operations iwl_dbgfs_##name##_ops = { \
        .read = iwl_dbgfs_##name##_read,                                \
@@ -524,9 +913,14 @@ MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
 MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
 MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
+#ifdef CONFIG_PM_SLEEP
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
+#endif
 
 /* Interface specific debugfs entries */
 MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params);
 
 int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
 {
@@ -542,6 +936,10 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
+#ifdef CONFIG_PM_SLEEP
+       MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
+#endif
 
        /*
         * Create a symlink with mac80211. It will be removed when mac80211
@@ -577,9 +975,19 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                return;
        }
 
+       if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+           vif->type == NL80211_IFTYPE_STATION && !vif->p2p)
+               MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
+                                        S_IRUSR);
+
        MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
                                 S_IRUSR);
 
+       if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+           mvmvif == mvm->bf_allowed_vif)
+               MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+
        /*
         * Create symlink for convenience pointing to interface specific
         * debugfs entries for the driver. For example, under
index 51e015d1dfb297cfeb6169afcc2db2ae57dc6a8d..6f8b2c16ae171965253c0d7e0e149ab3ff0d52f6 100644 (file)
@@ -75,13 +75,15 @@ enum iwl_d3_wakeup_flags {
  * struct iwl_d3_manager_config - D3 manager configuration command
  * @min_sleep_time: minimum sleep time (in usec)
  * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
+ * @wakeup_host_timer: force wakeup after this many seconds
  *
  * The structure is used for the D3_CONFIG_CMD command.
  */
 struct iwl_d3_manager_config {
        __le32 min_sleep_time;
        __le32 wakeup_flags;
-} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_3 */
+       __le32 wakeup_host_timer;
+} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */
 
 
 /* TODO: OFFLOADS_QUERY_API_S_VER_1 */
index d68640ea41d42e3ea0612d1fa562de1b00ba50e6..98b1feb43d388f70bc4aadf0f4434cc714744d05 100644 (file)
 #define MAC_INDEX_MIN_DRIVER   0
 #define NUM_MAC_INDEX_DRIVER   MAC_INDEX_AUX
 
-#define AC_NUM 4 /* Number of access categories */
+enum iwl_ac {
+       AC_BK,
+       AC_BE,
+       AC_VI,
+       AC_VO,
+       AC_NUM,
+};
 
 /**
  * enum iwl_mac_protection_flags - MAC context flags
index 81fe45f46be7e97f1d346ada8c457d065b9a377e..d8e19290b0f327f27b29b7d2fc5d716ed781b4de 100644 (file)
@@ -101,20 +101,107 @@ enum iwl_power_flags {
  * @tx_data_timeout:    Minimum time (usec) from last Tx packet for AM to
  *                     PSM transition - legacy PM
  * @sleep_interval:    not in use
- * @keep_alive_beacons:        not in use
+ * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
+ *                     is set. For example, if it is required to skip over
+ *                     one DTIM, this value need to be set to 2 (DTIM periods).
  * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
  *                     Default: 80dbm
  */
 struct iwl_powertable_cmd {
-       /* PM_POWER_TABLE_CMD_API_S_VER_5 */
+       /* PM_POWER_TABLE_CMD_API_S_VER_6 */
        __le16 flags;
        u8 keep_alive_seconds;
        u8 debug_flags;
        __le32 rx_data_timeout;
        __le32 tx_data_timeout;
        __le32 sleep_interval[IWL_POWER_VEC_SIZE];
-       __le32 keep_alive_beacons;
+       __le32 skip_dtim_periods;
        __le32 lprx_rssi_threshold;
 } __packed;
 
+/**
+ * struct iwl_beacon_filter_cmd
+ * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
+ * @id_and_color: MAC contex identifier
+ * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
+ *      to driver if delta in Energy values calculated for this and last
+ *      passed beacon is greater than this threshold. Zero value means that
+ *      the Energy change is ignored for beacon filtering, and beacon will
+ *      not be forced to be sent to driver regardless of this delta. Typical
+ *      energy delta 5dB.
+ * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state.
+ *      Send beacon to driver if delta in Energy values calculated for this
+ *      and last passed beacon is greater than this threshold. Zero value
+ *      means that the Energy change is ignored for beacon filtering while in
+ *      Roaming state, typical energy delta 1dB.
+ * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values
+ *      calculated for current beacon is less than the threshold, use
+ *      Roaming Energy Delta Threshold, otherwise use normal Energy Delta
+ *      Threshold. Typical energy threshold is -72dBm.
+ * @bf_temperature_delta: Send Beacon to driver if delta in temperature values
+ *      calculated for this and the last passed beacon is greater than  this
+ *      threshold. Zero value means that the temperature changeis ignored for
+ *      beacon filtering; beacons will not be  forced to be sent to driver
+ *      regardless of whether its temerature has been changed.
+ * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
+ * @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
+ *      for a specific period of time. Units: Beacons.
+ * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
+ *      for a longer period of time then this escape-timeout. Units: Beacons.
+ * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
+ */
+struct iwl_beacon_filter_cmd {
+       u8 bf_energy_delta;
+       u8 bf_roaming_energy_delta;
+       u8 bf_roaming_state;
+       u8 bf_temperature_delta;
+       u8 bf_enable_beacon_filter;
+       u8 bf_debug_flag;
+       __le16 reserved1;
+       __le32 bf_escape_timer;
+       __le32 ba_escape_timer;
+       u8 ba_enable_beacon_abort;
+       u8 reserved2[3];
+} __packed;
+
+/* Beacon filtering and beacon abort */
+#define IWL_BF_ENERGY_DELTA_DEFAULT 5
+#define IWL_BF_ENERGY_DELTA_MAX 255
+#define IWL_BF_ENERGY_DELTA_MIN 0
+
+#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
+#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
+
+#define IWL_BF_ROAMING_STATE_DEFAULT 72
+#define IWL_BF_ROAMING_STATE_MAX 255
+#define IWL_BF_ROAMING_STATE_MIN 0
+
+#define IWL_BF_TEMPERATURE_DELTA_DEFAULT 5
+#define IWL_BF_TEMPERATURE_DELTA_MAX 255
+#define IWL_BF_TEMPERATURE_DELTA_MIN 0
+
+#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
+
+#define IWL_BF_DEBUG_FLAG_DEFAULT 0
+
+#define IWL_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWL_BF_ESCAPE_TIMER_MAX 1024
+#define IWL_BF_ESCAPE_TIMER_MIN 0
+
+#define IWL_BA_ESCAPE_TIMER_DEFAULT 3
+#define IWL_BA_ESCAPE_TIMER_MAX 1024
+#define IWL_BA_ESCAPE_TIMER_MIN 0
+
+#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
+
+#define IWL_BF_CMD_CONFIG_DEFAULTS                                     \
+       .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT,                 \
+       .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, \
+       .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT,               \
+       .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT,       \
+       .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT,                     \
+       .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT),    \
+       .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
+
 #endif
index 007a93b25bd79ca96134a60ebe5ec79e69be56fe..700cce731770ab800640a421b2c99b3d06b6fad1 100644 (file)
@@ -134,6 +134,7 @@ enum iwl_tx_flags {
 #define TX_CMD_SEC_WEP                 0x01
 #define TX_CMD_SEC_CCM                 0x02
 #define TX_CMD_SEC_TKIP                        0x03
+#define TX_CMD_SEC_MSK                 0x07
 #define TX_CMD_SEC_WEP_KEY_IDX_POS     6
 #define TX_CMD_SEC_WEP_KEY_IDX_MSK     0xc0
 #define TX_CMD_SEC_KEY128              0x08
@@ -227,10 +228,11 @@ struct iwl_tx_cmd {
        __le16 len;
        __le16 next_frame_len;
        __le32 tx_flags;
-       /* DRAM_SCRATCH_API_U_VER_1 */
-       u8 try_cnt;
-       u8 btkill_cnt;
-       __le16 reserved;
+       struct {
+               u8 try_cnt;
+               u8 btkill_cnt;
+               __le16 reserved;
+       } scratch; /* DRAM_SCRATCH_API_U_VER_1 */
        __le32 rate_n_flags;
        u8 sta_id;
        u8 sec_ctl;
index c6384555aab4de0172c7f1a7c5662cce776e76ed..cbfb3beae7838740ca7f72e54705b197ec4e6b49 100644 (file)
@@ -139,6 +139,9 @@ enum {
        /* Power */
        POWER_TABLE_CMD = 0x77,
 
+       /* Thermal Throttling*/
+       REPLY_THERMAL_MNG_BACKOFF = 0x7e,
+
        /* Scanning */
        SCAN_REQUEST_CMD = 0x80,
        SCAN_ABORT_CMD = 0x81,
@@ -161,6 +164,8 @@ enum {
        CARD_STATE_CMD = 0xa0,
        CARD_STATE_NOTIFICATION = 0xa1,
 
+       MISSED_BEACONS_NOTIFICATION = 0xa2,
+
        REPLY_RX_PHY_CMD = 0xc0,
        REPLY_RX_MPDU_CMD = 0xc1,
        BA_NOTIF = 0xc5,
@@ -170,6 +175,8 @@ enum {
        BT_COEX_PROT_ENV = 0xcd,
        BT_PROFILE_NOTIFICATION = 0xce,
 
+       REPLY_BEACON_FILTERING_CMD = 0xd2,
+
        REPLY_DEBUG_CMD = 0xf0,
        DEBUG_LOG_MSG = 0xf7,
 
@@ -937,6 +944,24 @@ struct iwl_card_state_notif {
        __le32 flags;
 } __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
 
+/**
+ * struct iwl_missed_beacons_notif - information on missed beacons
+ * ( MISSED_BEACONS_NOTIFICATION = 0xa2 )
+ * @mac_id: interface ID
+ * @consec_missed_beacons_since_last_rx: number of consecutive missed
+ *     beacons since last RX.
+ * @consec_missed_beacons: number of consecutive missed beacons
+ * @num_expected_beacons:
+ * @num_recvd_beacons:
+ */
+struct iwl_missed_beacons_notif {
+       __le32 mac_id;
+       __le32 consec_missed_beacons_since_last_rx;
+       __le32 consec_missed_beacons;
+       __le32 num_expected_beacons;
+       __le32 num_recvd_beacons;
+} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
+
 /**
  * struct iwl_set_calib_default_cmd - set default value for calibration.
  * ( SET_CALIB_DEFAULT_CMD = 0x8e )
@@ -975,4 +1000,212 @@ struct iwl_mcast_filter_cmd {
        u8 addr_list[0];
 } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
 
+struct mvm_statistics_dbg {
+       __le32 burst_check;
+       __le32 burst_count;
+       __le32 wait_for_silence_timeout_cnt;
+       __le32 reserved[3];
+} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
+
+struct mvm_statistics_div {
+       __le32 tx_on_a;
+       __le32 tx_on_b;
+       __le32 exec_time;
+       __le32 probe_time;
+       __le32 rssi_ant;
+       __le32 reserved2;
+} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
+
+struct mvm_statistics_general_common {
+       __le32 temperature;   /* radio temperature */
+       __le32 temperature_m; /* radio voltage */
+       struct mvm_statistics_dbg dbg;
+       __le32 sleep_time;
+       __le32 slots_out;
+       __le32 slots_idle;
+       __le32 ttl_timestamp;
+       struct mvm_statistics_div div;
+       __le32 rx_enable_counter;
+       /*
+        * num_of_sos_states:
+        *  count the number of times we have to re-tune
+        *  in order to get out of bad PHY status
+        */
+       __le32 num_of_sos_states;
+} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
+
+struct mvm_statistics_rx_non_phy {
+       __le32 bogus_cts;       /* CTS received when not expecting CTS */
+       __le32 bogus_ack;       /* ACK received when not expecting ACK */
+       __le32 non_bssid_frames;        /* number of frames with BSSID that
+                                        * doesn't belong to the STA BSSID */
+       __le32 filtered_frames; /* count frames that were dumped in the
+                                * filtering process */
+       __le32 non_channel_beacons;     /* beacons with our bss id but not on
+                                        * our serving channel */
+       __le32 channel_beacons; /* beacons with our bss id and in our
+                                * serving channel */
+       __le32 num_missed_bcon; /* number of missed beacons */
+       __le32 adc_rx_saturation_time;  /* count in 0.8us units the time the
+                                        * ADC was in saturation */
+       __le32 ina_detection_search_time;/* total time (in 0.8us) searched
+                                         * for INA */
+       __le32 beacon_silence_rssi_a;   /* RSSI silence after beacon frame */
+       __le32 beacon_silence_rssi_b;   /* RSSI silence after beacon frame */
+       __le32 beacon_silence_rssi_c;   /* RSSI silence after beacon frame */
+       __le32 interference_data_flag;  /* flag for interference data
+                                        * availability. 1 when data is
+                                        * available. */
+       __le32 channel_load;            /* counts RX Enable time in uSec */
+       __le32 dsp_false_alarms;        /* DSP false alarm (both OFDM
+                                        * and CCK) counter */
+       __le32 beacon_rssi_a;
+       __le32 beacon_rssi_b;
+       __le32 beacon_rssi_c;
+       __le32 beacon_energy_a;
+       __le32 beacon_energy_b;
+       __le32 beacon_energy_c;
+       __le32 num_bt_kills;
+       __le32 mac_id;
+       __le32 directed_data_mpdu;
+} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
+
+struct mvm_statistics_rx_phy {
+       __le32 ina_cnt;
+       __le32 fina_cnt;
+       __le32 plcp_err;
+       __le32 crc32_err;
+       __le32 overrun_err;
+       __le32 early_overrun_err;
+       __le32 crc32_good;
+       __le32 false_alarm_cnt;
+       __le32 fina_sync_err_cnt;
+       __le32 sfd_timeout;
+       __le32 fina_timeout;
+       __le32 unresponded_rts;
+       __le32 rxe_frame_limit_overrun;
+       __le32 sent_ack_cnt;
+       __le32 sent_cts_cnt;
+       __le32 sent_ba_rsp_cnt;
+       __le32 dsp_self_kill;
+       __le32 mh_format_err;
+       __le32 re_acq_main_rssi_sum;
+       __le32 reserved;
+} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
+
+struct mvm_statistics_rx_ht_phy {
+       __le32 plcp_err;
+       __le32 overrun_err;
+       __le32 early_overrun_err;
+       __le32 crc32_good;
+       __le32 crc32_err;
+       __le32 mh_format_err;
+       __le32 agg_crc32_good;
+       __le32 agg_mpdu_cnt;
+       __le32 agg_cnt;
+       __le32 unsupport_mcs;
+} __packed;  /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
+
+#define MAX_CHAINS 3
+
+struct mvm_statistics_tx_non_phy_agg {
+       __le32 ba_timeout;
+       __le32 ba_reschedule_frames;
+       __le32 scd_query_agg_frame_cnt;
+       __le32 scd_query_no_agg;
+       __le32 scd_query_agg;
+       __le32 scd_query_mismatch;
+       __le32 frame_not_ready;
+       __le32 underrun;
+       __le32 bt_prio_kill;
+       __le32 rx_ba_rsp_cnt;
+       __s8 txpower[MAX_CHAINS];
+       __s8 reserved;
+       __le32 reserved2;
+} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
+
+struct mvm_statistics_tx_channel_width {
+       __le32 ext_cca_narrow_ch20[1];
+       __le32 ext_cca_narrow_ch40[2];
+       __le32 ext_cca_narrow_ch80[3];
+       __le32 ext_cca_narrow_ch160[4];
+       __le32 last_tx_ch_width_indx;
+       __le32 rx_detected_per_ch_width[4];
+       __le32 success_per_ch_width[4];
+       __le32 fail_per_ch_width[4];
+}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
+
+struct mvm_statistics_tx {
+       __le32 preamble_cnt;
+       __le32 rx_detected_cnt;
+       __le32 bt_prio_defer_cnt;
+       __le32 bt_prio_kill_cnt;
+       __le32 few_bytes_cnt;
+       __le32 cts_timeout;
+       __le32 ack_timeout;
+       __le32 expected_ack_cnt;
+       __le32 actual_ack_cnt;
+       __le32 dump_msdu_cnt;
+       __le32 burst_abort_next_frame_mismatch_cnt;
+       __le32 burst_abort_missing_next_frame_cnt;
+       __le32 cts_timeout_collision;
+       __le32 ack_or_ba_timeout_collision;
+       struct mvm_statistics_tx_non_phy_agg agg;
+       struct mvm_statistics_tx_channel_width channel_width;
+} __packed; /* STATISTICS_TX_API_S_VER_4 */
+
+
+struct mvm_statistics_bt_activity {
+       __le32 hi_priority_tx_req_cnt;
+       __le32 hi_priority_tx_denied_cnt;
+       __le32 lo_priority_tx_req_cnt;
+       __le32 lo_priority_tx_denied_cnt;
+       __le32 hi_priority_rx_req_cnt;
+       __le32 hi_priority_rx_denied_cnt;
+       __le32 lo_priority_rx_req_cnt;
+       __le32 lo_priority_rx_denied_cnt;
+} __packed;  /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
+
+struct mvm_statistics_general {
+       struct mvm_statistics_general_common common;
+       __le32 beacon_filtered;
+       __le32 missed_beacons;
+       __s8 beacon_filter_everage_energy;
+       __s8 beacon_filter_reason;
+       __s8 beacon_filter_current_energy;
+       __s8 beacon_filter_reserved;
+       __le32 beacon_filter_delta_time;
+       struct mvm_statistics_bt_activity bt_activity;
+} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
+
+struct mvm_statistics_rx {
+       struct mvm_statistics_rx_phy ofdm;
+       struct mvm_statistics_rx_phy cck;
+       struct mvm_statistics_rx_non_phy general;
+       struct mvm_statistics_rx_ht_phy ofdm_ht;
+} __packed; /* STATISTICS_RX_API_S_VER_3 */
+
+/*
+ * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated.  To disable this behavior, set DISABLE_NOTIF flag in the
+ * REPLY_STATISTICS_CMD 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans.  uCode clears statistics
+ * appropriately so that each notification contains statistics for only the
+ * one channel that has just been scanned.
+ */
+
+struct iwl_notif_statistics { /* STATISTICS_NTFY_API_S_VER_8 */
+       __le32 flag;
+       struct mvm_statistics_rx rx;
+       struct mvm_statistics_tx tx;
+       struct mvm_statistics_general general;
+} __packed;
+
 #endif /* __fw_api_h__ */
index e18c92dd60ecdd768c86e23fe6d0f6505f396018..cd7c0032cc583073bd5acaa7ceebbd19457aa1c4 100644 (file)
@@ -326,6 +326,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
        WARN_ON(ret);
 
+       /*
+        * abort after reading the nvm in case RF Kill is on, we will complete
+        * the init seq later when RF kill will switch to off
+        */
+       if (iwl_mvm_is_radio_killed(mvm)) {
+               IWL_DEBUG_RF_KILL(mvm,
+                                 "jump over all phy activities due to RF kill\n");
+               iwl_remove_notification(&mvm->notif_wait, &calib_wait);
+               return 1;
+       }
+
        /* Send TX valid antennas before triggering calibrations */
        ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
        if (ret)
@@ -388,6 +399,8 @@ out:
 int iwl_mvm_up(struct iwl_mvm *mvm)
 {
        int ret, i;
+       struct ieee80211_channel *chan;
+       struct cfg80211_chan_def chandef;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -400,8 +413,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                ret = iwl_run_init_mvm_ucode(mvm, false);
                if (ret && !iwlmvm_mod_params.init_dbg) {
                        IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+                       /* this can't happen */
+                       if (WARN_ON(ret > 0))
+                               ret = -ERFKILL;
                        goto error;
                }
+               /* should stop & start HW since that INIT image just loaded */
+               iwl_trans_stop_hw(mvm->trans, false);
+               ret = iwl_trans_start_hw(mvm->trans);
+               if (ret)
+                       return ret;
        }
 
        if (iwlmvm_mod_params.init_dbg)
@@ -443,8 +464,22 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        if (ret)
                goto error;
 
-       IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
+       /* Add all the PHY contexts */
+       chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
+       cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+       for (i = 0; i < NUM_PHY_CTX; i++) {
+               /*
+                * The channel used here isn't relevant as it's
+                * going to be overwritten in the other flows.
+                * For now use the first channel we have.
+                */
+               ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
+                                          &chandef, 1, 1);
+               if (ret)
+                       goto error;
+       }
 
+       IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
        return 0;
  error:
        iwl_trans_stop_device(mvm->trans);
index b2cc3d98e0f7aa2e89b744b17cd706ed20268cc6..273b0cc197ab4e2e7e8af2ea2b65d6baffcdb8a9 100644 (file)
@@ -193,14 +193,11 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
 u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm,
                                struct ieee80211_vif *vif)
 {
-       u32 qmask, ac;
+       u32 qmask = 0, ac;
 
        if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
                return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
 
-       qmask = (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) ?
-               BIT(vif->cab_queue) : 0;
-
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
                        qmask |= BIT(vif->hw_queue[ac]);
@@ -227,7 +224,7 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
                .found_vif = false,
        };
        u32 ac;
-       int ret;
+       int ret, i;
 
        /*
         * Allocate a MAC ID and a TSF for this MAC, along with the queues
@@ -335,6 +332,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
        mvmvif->bcast_sta.sta_id = IWL_MVM_STATION_COUNT;
        mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
 
+       for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
+               mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
+
        return 0;
 
 exit_fail:
@@ -362,7 +362,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                break;
        case NL80211_IFTYPE_AP:
                iwl_trans_ac_txq_enable(mvm->trans, vif->cab_queue,
-                                       IWL_MVM_TX_FIFO_VO);
+                                       IWL_MVM_TX_FIFO_MCAST);
                /* fall through */
        default:
                for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
@@ -550,6 +550,10 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
                cmd->ac[i].fifos_mask = BIT(iwl_mvm_ac_to_tx_fifo[i]);
        }
 
+       /* in AP mode, the MCAST FIFO takes the EDCA params from VO */
+       if (vif->type == NL80211_IFTYPE_AP)
+               cmd->ac[AC_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+
        if (vif->bss_conf.qos)
                cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
 
@@ -1047,3 +1051,28 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
                     rate);
        return 0;
 }
+
+static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
+                                        struct ieee80211_vif *vif)
+{
+       u16 *id = _data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (mvmvif->id == *id)
+               ieee80211_beacon_loss(vif);
+}
+
+int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+                                   struct iwl_rx_cmd_buffer *rxb,
+                                   struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_missed_beacons_notif *missed_beacons = (void *)pkt->data;
+       u16 id = (u16)le32_to_cpu(missed_beacons->mac_id);
+
+       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_mvm_beacon_loss_iterator,
+                                                  &id);
+       return 0;
+}
index a5eb8c82f16a806fea43738629e738cdeaaba0b6..e08683b2053183f7e7ea9df370a37bdb4b70c2e4 100644 (file)
 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
        {
                .max = 1,
-               .types = BIT(NL80211_IFTYPE_STATION) |
-                       BIT(NL80211_IFTYPE_AP),
+               .types = BIT(NL80211_IFTYPE_STATION),
        },
        {
                .max = 1,
-               .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+               .types = BIT(NL80211_IFTYPE_AP) |
+                       BIT(NL80211_IFTYPE_P2P_CLIENT) |
                        BIT(NL80211_IFTYPE_P2P_GO),
        },
        {
@@ -127,6 +127,17 @@ static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
 };
 #endif
 
+static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
+{
+       int i;
+
+       memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
+       for (i = 0; i < NUM_PHY_CTX; i++) {
+               mvm->phy_ctxts[i].id = i;
+               mvm->phy_ctxts[i].ref = 0;
+       }
+}
+
 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 {
        struct ieee80211_hw *hw = mvm->hw;
@@ -141,7 +152,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                    IEEE80211_HW_SUPPORTS_PS |
                    IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
                    IEEE80211_HW_AMPDU_AGGREGATION |
-                   IEEE80211_HW_TIMING_BEACON_ONLY;
+                   IEEE80211_HW_TIMING_BEACON_ONLY |
+                   IEEE80211_HW_CONNECTION_MONITOR;
 
        hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
        hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
@@ -158,7 +170,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 
        hw->sta_data_size = sizeof(struct iwl_mvm_sta);
        hw->vif_data_size = sizeof(struct iwl_mvm_vif);
-       hw->chanctx_data_size = sizeof(struct iwl_mvm_phy_ctxt);
+       hw->chanctx_data_size = sizeof(u16);
 
        hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
                BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -193,6 +205,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->wiphy->n_addresses++;
        }
 
+       iwl_mvm_reset_phy_ctxts(mvm);
+
        /* we create the 802.11 header and a max-length SSID element */
        hw->wiphy->max_scan_ie_len =
                mvm->fw->ucode_capa.max_probe_length - 24 - 34;
@@ -222,20 +236,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
            mvm->trans->ops->d3_suspend &&
            mvm->trans->ops->d3_resume &&
            device_can_wakeup(mvm->trans->dev)) {
-               hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
-                                         WIPHY_WOWLAN_DISCONNECT |
-                                         WIPHY_WOWLAN_EAP_IDENTITY_REQ |
-                                         WIPHY_WOWLAN_RFKILL_RELEASE;
+               mvm->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+                                   WIPHY_WOWLAN_DISCONNECT |
+                                   WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+                                   WIPHY_WOWLAN_RFKILL_RELEASE;
                if (!iwlwifi_mod_params.sw_crypto)
-                       hw->wiphy->wowlan.flags |=
-                               WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
-                               WIPHY_WOWLAN_GTK_REKEY_FAILURE |
-                               WIPHY_WOWLAN_4WAY_HANDSHAKE;
-
-               hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
-               hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
-               hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
-               hw->wiphy->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
+                       mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+                                            WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+                                            WIPHY_WOWLAN_4WAY_HANDSHAKE;
+
+               mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
+               mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
+               mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+               mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
+               hw->wiphy->wowlan = &mvm->wowlan;
        }
 #endif
 
@@ -252,8 +266,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
-       if (test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status)) {
-               IWL_DEBUG_DROP(mvm, "Dropping - RF KILL\n");
+       if (iwl_mvm_is_radio_killed(mvm)) {
+               IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
                goto drop;
        }
 
@@ -345,8 +359,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
        iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
        spin_unlock_bh(&mvm->time_event_lock);
 
-       if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
-               mvmvif->phy_ctxt = NULL;
+       mvmvif->phy_ctxt = NULL;
 }
 
 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
@@ -363,6 +376,9 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
                mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
                iwl_mvm_cleanup_iterator, mvm);
 
+       mvm->p2p_device_vif = NULL;
+
+       iwl_mvm_reset_phy_ctxts(mvm);
        memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
        memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
 
@@ -456,6 +472,20 @@ static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
        iwl_mvm_power_update_mode(mvm, vif);
 }
 
+static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
+{
+       u16 i;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       for (i = 0; i < NUM_PHY_CTX; i++)
+               if (!mvm->phy_ctxts[i].ref)
+                       return &mvm->phy_ctxts[i];
+
+       IWL_ERR(mvm, "No available PHY context\n");
+       return NULL;
+}
+
 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif)
 {
@@ -530,32 +560,34 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
         */
        iwl_mvm_power_update_mode(mvm, vif);
 
+       /* beacon filtering */
+       if (!mvm->bf_allowed_vif &&
+           vif->type == NL80211_IFTYPE_STATION && !vif->p2p){
+               mvm->bf_allowed_vif = mvmvif;
+               vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+       }
+
+       ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+       if (ret)
+               goto out_release;
+
        /*
         * P2P_DEVICE interface does not have a channel context assigned to it,
         * so a dedicated PHY context is allocated to it and the corresponding
         * MAC context is bound to it at this stage.
         */
        if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-               struct ieee80211_channel *chan;
-               struct cfg80211_chan_def chandef;
 
-               mvmvif->phy_ctxt = &mvm->phy_ctxt_roc;
-
-               /*
-                * The channel used here isn't relevant as it's
-                * going to be overwritten as part of the ROC flow.
-                * For now use the first channel we have.
-                */
-               chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
-               cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
-               ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt,
-                                          &chandef, 1, 1);
-               if (ret)
+               mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+               if (!mvmvif->phy_ctxt) {
+                       ret = -ENOSPC;
                        goto out_remove_mac;
+               }
 
+               iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
                ret = iwl_mvm_binding_add_vif(mvm, vif);
                if (ret)
-                       goto out_remove_phy;
+                       goto out_unref_phy;
 
                ret = iwl_mvm_add_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
                if (ret)
@@ -571,27 +603,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
 
  out_unbind:
        iwl_mvm_binding_remove_vif(mvm, vif);
- out_remove_phy:
-       iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt);
+ out_unref_phy:
+       iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
  out_remove_mac:
        mvmvif->phy_ctxt = NULL;
        iwl_mvm_mac_ctxt_remove(mvm, vif);
  out_release:
-       /*
-        * TODO: remove this temporary code.
-        * Currently MVM FW supports power management only on single MAC.
-        * Check if only one additional interface remains after releasing
-        * current one. Update power mode on the remaining interface.
-        */
        if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
                mvm->vif_count--;
-       IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
-                          mvm->vif_count);
-       if (mvm->vif_count == 1) {
-               ieee80211_iterate_active_interfaces(
-                                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                       iwl_mvm_power_update_iterator, mvm);
-       }
+       ieee80211_iterate_active_interfaces(
+               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+               iwl_mvm_power_update_iterator, mvm);
        iwl_mvm_mac_ctxt_release(mvm, vif);
  out_unlock:
        mutex_unlock(&mvm->mutex);
@@ -629,8 +651,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
                 * By now, all the AC queues are empty. The AGG queues are
                 * empty too. We already got all the Tx responses for all the
                 * packets in the queues. The drain work can have been
-                * triggered. Flush it. This work item takes the mutex, so kill
-                * it before we take it.
+                * triggered. Flush it.
                 */
                flush_work(&mvm->sta_drained_wk);
        }
@@ -646,6 +667,11 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
+       if (mvm->bf_allowed_vif == mvmvif) {
+               mvm->bf_allowed_vif = NULL;
+               vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+       }
+
        iwl_mvm_vif_dbgfs_clean(mvm, vif);
 
        /*
@@ -661,7 +687,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
                mvm->p2p_device_vif = NULL;
                iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
                iwl_mvm_binding_remove_vif(mvm, vif);
-               iwl_mvm_phy_ctxt_remove(mvm, mvmvif->phy_ctxt);
+               iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
                mvmvif->phy_ctxt = NULL;
        }
 
@@ -748,7 +774,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                        if (ret)
                                IWL_ERR(mvm, "failed to update quotas\n");
                }
-       } else if (changes & BSS_CHANGED_DTIM_PERIOD) {
+               ret = iwl_mvm_power_update_mode(mvm, vif);
+               if (ret)
+                       IWL_ERR(mvm, "failed to update power mode\n");
+       } else if (changes & BSS_CHANGED_BEACON_INFO) {
                /*
                 * We received a beacon _after_ association so
                 * remove the session protection.
@@ -756,19 +785,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                iwl_mvm_remove_time_event(mvm, mvmvif,
                                          &mvmvif->time_event_data);
        } else if (changes & BSS_CHANGED_PS) {
-               /*
-                * TODO: remove this temporary code.
-                * Currently MVM FW supports power management only on single
-                * MAC. Avoid power mode update if more than one interface
-                * is active.
-                */
-               IWL_DEBUG_MAC80211(mvm, "Currently %d interfaces active\n",
-                                  mvm->vif_count);
-               if (mvm->vif_count == 1) {
-                       ret = iwl_mvm_power_update_mode(mvm, vif);
-                       if (ret)
-                               IWL_ERR(mvm, "failed to update power mode\n");
-               }
+               ret = iwl_mvm_power_update_mode(mvm, vif);
+               if (ret)
+                       IWL_ERR(mvm, "failed to update power mode\n");
        }
 }
 
@@ -999,9 +1018,13 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                                             mvmvif->phy_ctxt->channel->band);
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTHORIZED) {
+               /* enable beacon filtering */
+               WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
                ret = 0;
        } else if (old_state == IEEE80211_STA_AUTHORIZED &&
                   new_state == IEEE80211_STA_ASSOC) {
+               /* disable beacon filtering */
+               WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif));
                ret = 0;
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTH) {
@@ -1167,29 +1190,107 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
                       enum ieee80211_roc_type type)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct cfg80211_chan_def chandef;
-       int ret;
+       struct iwl_mvm_phy_ctxt *phy_ctxt;
+       int ret, i;
+
+       IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
+                          duration, type);
 
        if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
                IWL_ERR(mvm, "vif isn't a P2P_DEVICE: %d\n", vif->type);
                return -EINVAL;
        }
 
-       IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
-                          duration, type);
-
        mutex_lock(&mvm->mutex);
 
+       for (i = 0; i < NUM_PHY_CTX; i++) {
+               phy_ctxt = &mvm->phy_ctxts[i];
+               if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
+                       continue;
+
+               if (phy_ctxt->ref && channel == phy_ctxt->channel) {
+                       /*
+                        * Unbind the P2P_DEVICE from the current PHY context,
+                        * and if the PHY context is not used remove it.
+                        */
+                       ret = iwl_mvm_binding_remove_vif(mvm, vif);
+                       if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
+                               goto out_unlock;
+
+                       iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+
+                       /* Bind the P2P_DEVICE to the current PHY Context */
+                       mvmvif->phy_ctxt = phy_ctxt;
+
+                       ret = iwl_mvm_binding_add_vif(mvm, vif);
+                       if (WARN(ret, "Failed binding P2P_DEVICE\n"))
+                               goto out_unlock;
+
+                       iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
+                       goto schedule_time_event;
+               }
+       }
+
+       /* Need to update the PHY context only if the ROC channel changed */
+       if (channel == mvmvif->phy_ctxt->channel)
+               goto schedule_time_event;
+
        cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
-       ret = iwl_mvm_phy_ctxt_changed(mvm, &mvm->phy_ctxt_roc,
-                                      &chandef, 1, 1);
 
+       /*
+        * Change the PHY context configuration as it is currently referenced
+        * only by the P2P Device MAC
+        */
+       if (mvmvif->phy_ctxt->ref == 1) {
+               ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
+                                              &chandef, 1, 1);
+               if (ret)
+                       goto out_unlock;
+       } else {
+               /*
+                * The PHY context is shared with other MACs. Need to remove the
+                * P2P Device from the binding, allocate an new PHY context and
+                * create a new binding
+                */
+               phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+               if (!phy_ctxt) {
+                       ret = -ENOSPC;
+                       goto out_unlock;
+               }
+
+               ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
+                                              1, 1);
+               if (ret) {
+                       IWL_ERR(mvm, "Failed to change PHY context\n");
+                       goto out_unlock;
+               }
+
+               /* Unbind the P2P_DEVICE from the current PHY context */
+               ret = iwl_mvm_binding_remove_vif(mvm, vif);
+               if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
+                       goto out_unlock;
+
+               iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+
+               /* Bind the P2P_DEVICE to the new allocated PHY context */
+               mvmvif->phy_ctxt = phy_ctxt;
+
+               ret = iwl_mvm_binding_add_vif(mvm, vif);
+               if (WARN(ret, "Failed binding P2P_DEVICE\n"))
+                       goto out_unlock;
+
+               iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
+       }
+
+schedule_time_event:
        /* Schedule the time events */
        ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
 
+out_unlock:
        mutex_unlock(&mvm->mutex);
        IWL_DEBUG_MAC80211(mvm, "leave\n");
-
        return ret;
 }
 
@@ -1211,15 +1312,30 @@ static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
                               struct ieee80211_chanctx_conf *ctx)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-       struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+       u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+       struct iwl_mvm_phy_ctxt *phy_ctxt;
        int ret;
 
+       IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
+
        mutex_lock(&mvm->mutex);
+       phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+       if (!phy_ctxt) {
+               ret = -ENOSPC;
+               goto out;
+       }
 
-       IWL_DEBUG_MAC80211(mvm, "Add PHY context\n");
-       ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, &ctx->def,
-                                  ctx->rx_chains_static,
-                                  ctx->rx_chains_dynamic);
+       ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
+                                      ctx->rx_chains_static,
+                                      ctx->rx_chains_dynamic);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to add PHY context\n");
+               goto out;
+       }
+
+       iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
+       *phy_ctxt_id = phy_ctxt->id;
+out:
        mutex_unlock(&mvm->mutex);
        return ret;
 }
@@ -1228,10 +1344,11 @@ static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
                                   struct ieee80211_chanctx_conf *ctx)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-       struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+       u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+       struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
 
        mutex_lock(&mvm->mutex);
-       iwl_mvm_phy_ctxt_remove(mvm, phy_ctxt);
+       iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
        mutex_unlock(&mvm->mutex);
 }
 
@@ -1240,7 +1357,16 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
                                   u32 changed)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-       struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
+       u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+       struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+
+       if (WARN_ONCE((phy_ctxt->ref > 1) &&
+                     (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
+                                  IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
+                                  IEEE80211_CHANCTX_CHANGE_RADAR)),
+                     "Cannot change PHY. Ref=%d, changed=0x%X\n",
+                     phy_ctxt->ref, changed))
+               return;
 
        mutex_lock(&mvm->mutex);
        iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
@@ -1254,13 +1380,14 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
                                      struct ieee80211_chanctx_conf *ctx)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-       struct iwl_mvm_phy_ctxt *phyctx = (void *)ctx->drv_priv;
+       u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+       struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int ret;
 
        mutex_lock(&mvm->mutex);
 
-       mvmvif->phy_ctxt = phyctx;
+       mvmvif->phy_ctxt = phy_ctxt;
 
        switch (vif->type) {
        case NL80211_IFTYPE_AP:
index 9f46b23801bc84187b6aba6ed4b89ca7149eafc6..c7409f159a36bf9d28314811b9d4a9fe64d42527 100644 (file)
@@ -88,6 +88,7 @@ enum iwl_mvm_tx_fifo {
        IWL_MVM_TX_FIFO_BE,
        IWL_MVM_TX_FIFO_VI,
        IWL_MVM_TX_FIFO_VO,
+       IWL_MVM_TX_FIFO_MCAST = 5,
 };
 
 extern struct ieee80211_ops iwl_mvm_hw_ops;
@@ -109,6 +110,7 @@ extern struct iwl_mvm_mod_params iwlmvm_mod_params;
 struct iwl_mvm_phy_ctxt {
        u16 id;
        u16 color;
+       u32 ref;
 
        /*
         * TODO: This should probably be removed. Currently here only for rate
@@ -149,6 +151,60 @@ enum iwl_power_scheme {
 
 #define IWL_CONN_MAX_LISTEN_INTERVAL   70
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+enum iwl_dbgfs_pm_mask {
+       MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
+       MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1),
+       MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
+       MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
+       MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
+       MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
+};
+
+struct iwl_dbgfs_pm {
+       u8 keep_alive_seconds;
+       u32 rx_data_timeout;
+       u32 tx_data_timeout;
+       bool skip_over_dtim;
+       u8 skip_dtim_periods;
+       bool disable_power_off;
+       int mask;
+};
+
+/* beacon filtering */
+
+enum iwl_dbgfs_bf_mask {
+       MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
+       MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
+       MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
+       MVM_DEBUGFS_BF_TEMPERATURE_DELTA = BIT(3),
+       MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(4),
+       MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(5),
+       MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(6),
+       MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(7),
+       MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(8),
+};
+
+struct iwl_dbgfs_bf {
+       u8 bf_energy_delta;
+       u8 bf_roaming_energy_delta;
+       u8 bf_roaming_state;
+       u8 bf_temperature_delta;
+       u8 bf_enable_beacon_filter;
+       u8 bf_debug_flag;
+       u32 bf_escape_timer;
+       u32 ba_escape_timer;
+       u8 ba_enable_beacon_abort;
+       int mask;
+};
+#endif
+
+enum iwl_mvm_smps_type_request {
+       IWL_MVM_SMPS_REQ_BT_COEX,
+       IWL_MVM_SMPS_REQ_TT,
+       NUM_IWL_MVM_SMPS_REQ,
+};
+
 /**
  * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
  * @id: between 0 and 3
@@ -163,6 +219,8 @@ enum iwl_power_scheme {
  * @bcast_sta: station used for broadcast packets. Used by the following
  *  vifs: P2P_DEVICE, GO and AP.
  * @beacon_skb: the skb used to hold the AP/GO beacon template
+ * @smps_requests: the requests of of differents parts of the driver, regard
+       the desired smps mode.
  */
 struct iwl_mvm_vif {
        u16 id;
@@ -172,6 +230,8 @@ struct iwl_mvm_vif {
        bool uploaded;
        bool ap_active;
        bool monitor_active;
+       /* indicate whether beacon filtering is enabled */
+       bool bf_enabled;
 
        u32 ap_beacon_time;
 
@@ -214,7 +274,11 @@ struct iwl_mvm_vif {
        struct dentry *dbgfs_dir;
        struct dentry *dbgfs_slink;
        void *dbgfs_data;
+       struct iwl_dbgfs_pm dbgfs_pm;
+       struct iwl_dbgfs_bf dbgfs_bf;
 #endif
+
+       enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
 };
 
 static inline struct iwl_mvm_vif *
@@ -223,12 +287,6 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
        return (void *)vif->drv_priv;
 }
 
-enum iwl_mvm_status {
-       IWL_MVM_STATUS_HW_RFKILL,
-       IWL_MVM_STATUS_ROC_RUNNING,
-       IWL_MVM_STATUS_IN_HW_RESTART,
-};
-
 enum iwl_scan_status {
        IWL_MVM_SCAN_NONE,
        IWL_MVM_SCAN_OS,
@@ -246,6 +304,63 @@ struct iwl_nvm_section {
        const u8 *data;
 };
 
+/*
+ * Tx-backoff threshold
+ * @temperature: The threshold in Celsius
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_tt_tx_backoff {
+       s32 temperature;
+       u32 backoff;
+};
+
+#define TT_TX_BACKOFF_SIZE 6
+
+/**
+ * struct iwl_tt_params - thermal throttling parameters
+ * @ct_kill_entry: CT Kill entry threshold
+ * @ct_kill_exit: CT Kill exit threshold
+ * @ct_kill_duration: The time  intervals (in uSec) in which the driver needs
+ *     to checks whether to exit CT Kill.
+ * @dynamic_smps_entry: Dynamic SMPS entry threshold
+ * @dynamic_smps_exit: Dynamic SMPS exit threshold
+ * @tx_protection_entry: TX protection entry threshold
+ * @tx_protection_exit: TX protection exit threshold
+ * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
+ * @support_ct_kill: Support CT Kill?
+ * @support_dynamic_smps: Support dynamic SMPS?
+ * @support_tx_protection: Support tx protection?
+ * @support_tx_backoff: Support tx-backoff?
+ */
+struct iwl_tt_params {
+       s32 ct_kill_entry;
+       s32 ct_kill_exit;
+       u32 ct_kill_duration;
+       s32 dynamic_smps_entry;
+       s32 dynamic_smps_exit;
+       s32 tx_protection_entry;
+       s32 tx_protection_exit;
+       struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
+       bool support_ct_kill;
+       bool support_dynamic_smps;
+       bool support_tx_protection;
+       bool support_tx_backoff;
+};
+
+/**
+ * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
+ * @ct_kill_exit: worker to exit thermal kill
+ * @dynamic_smps: Is thermal throttling enabled dynamic_smps?
+ * @tx_backoff: The current thremal throttling tx backoff in uSec.
+ * @params: Parameters to configure the thermal throttling algorithm.
+ */
+struct iwl_mvm_tt_mgmt {
+       struct delayed_work ct_kill_exit;
+       bool dynamic_smps;
+       u32 tx_backoff;
+       const struct iwl_tt_params *params;
+};
+
 struct iwl_mvm {
        /* for logger access */
        struct device *dev;
@@ -266,6 +381,12 @@ struct iwl_mvm {
 
        unsigned long status;
 
+       /*
+        * for beacon filtering -
+        * currently only one interface can be supported
+        */
+       struct iwl_mvm_vif *bf_allowed_vif;
+
        enum iwl_ucode_type cur_ucode;
        bool ucode_loaded;
        bool init_ucode_run;
@@ -313,7 +434,7 @@ struct iwl_mvm {
        bool prevent_power_down_d3;
 #endif
 
-       struct iwl_mvm_phy_ctxt phy_ctxt_roc;
+       struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
 
        struct list_head time_event_list;
        spinlock_t time_event_lock;
@@ -337,12 +458,23 @@ struct iwl_mvm {
        struct ieee80211_vif *p2p_device_vif;
 
 #ifdef CONFIG_PM_SLEEP
+       struct wiphy_wowlan_support wowlan;
        int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       bool d3_test_active;
+       bool store_d3_resume_sram;
+       void *d3_resume_sram;
+       u32 d3_test_pme_ptr;
+#endif
 #endif
 
        /* BT-Coex */
        u8 bt_kill_msk;
        struct iwl_bt_coex_profile_notif last_bt_notif;
+
+       /* Thermal Throttling and CTkill */
+       struct iwl_mvm_tt_mgmt thermal_throttle;
+       s32 temperature;        /* Celsius */
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -352,6 +484,19 @@ struct iwl_mvm {
 #define IWL_MAC80211_GET_MVM(_hw)                      \
        IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
 
+enum iwl_mvm_status {
+       IWL_MVM_STATUS_HW_RFKILL,
+       IWL_MVM_STATUS_HW_CTKILL,
+       IWL_MVM_STATUS_ROC_RUNNING,
+       IWL_MVM_STATUS_IN_HW_RESTART,
+};
+
+static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
+{
+       return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) ||
+              test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -443,8 +588,10 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
 int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
                             struct cfg80211_chan_def *chandef,
                             u8 chains_static, u8 chains_dynamic);
-void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm,
-                            struct iwl_mvm_phy_ctxt *ctxt);
+void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
+                         struct iwl_mvm_phy_ctxt *ctxt);
+void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
+                           struct iwl_mvm_phy_ctxt *ctxt);
 
 /* MAC (virtual interface) programming */
 int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -459,6 +606,9 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
 int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
                            struct iwl_rx_cmd_buffer *rxb,
                            struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+                                   struct iwl_rx_cmd_buffer *rxb,
+                                   struct iwl_device_cmd *cmd);
 
 /* Bindings */
 int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -523,6 +673,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
                              struct inet6_dev *idev);
 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif, int idx);
+extern const struct file_operations iwl_dbgfs_d3_test_ops;
 
 /* BT Coex */
 int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -534,4 +685,31 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           enum ieee80211_rssi_event rssi_event);
 void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 
+/* beacon filtering */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+                                        struct iwl_beacon_filter_cmd *cmd);
+#else
+static inline void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+                                        struct iwl_beacon_filter_cmd *cmd)
+{}
+#endif
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif);
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+                                 struct ieee80211_vif *vif);
+
+/* SMPS */
+void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                               enum iwl_mvm_smps_type_request req_type,
+                               enum ieee80211_smps_mode smps_request);
+
+/* Thermal management and CT-kill */
+void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
+void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
+void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
+
 #endif /* __IWL_MVM_H__ */
index b8ec02f89acc86848a8514f0679c3369abf9ef91..edb94ea316545f439a18e8c16e0952066336e2a2 100644 (file)
@@ -60,6 +60,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+#include <linux/firmware.h>
 #include "iwl-trans.h"
 #include "mvm.h"
 #include "iwl-eeprom-parse.h"
@@ -75,31 +76,56 @@ static const int nvm_to_read[] = {
 };
 
 /* Default NVM size to read */
-#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024);
+#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
+#define IWL_MAX_NVM_SECTION_SIZE 6000
 
-static inline void iwl_nvm_fill_read(struct iwl_nvm_access_cmd *cmd,
-                                    u16 offset, u16 length, u16 section)
+#define NVM_WRITE_OPCODE 1
+#define NVM_READ_OPCODE 0
+
+/*
+ * prepare the NVM host command w/ the pointers to the nvm buffer
+ * and send it to fw
+ */
+static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
+                              u16 offset, u16 length, const u8 *data)
 {
-       cmd->offset = cpu_to_le16(offset);
-       cmd->length = cpu_to_le16(length);
-       cmd->type = cpu_to_le16(section);
+       struct iwl_nvm_access_cmd nvm_access_cmd = {
+               .offset = cpu_to_le16(offset),
+               .length = cpu_to_le16(length),
+               .type = cpu_to_le16(section),
+               .op_code = NVM_WRITE_OPCODE,
+       };
+       struct iwl_host_cmd cmd = {
+               .id = NVM_ACCESS_CMD,
+               .len = { sizeof(struct iwl_nvm_access_cmd), length },
+               .flags = CMD_SYNC | CMD_SEND_IN_RFKILL,
+               .data = { &nvm_access_cmd, data },
+               /* data may come from vmalloc, so use _DUP */
+               .dataflags = { 0, IWL_HCMD_DFL_DUP },
+       };
+
+       return iwl_mvm_send_cmd(mvm, &cmd);
 }
 
 static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
                              u16 offset, u16 length, u8 *data)
 {
-       struct iwl_nvm_access_cmd nvm_access_cmd = {};
+       struct iwl_nvm_access_cmd nvm_access_cmd = {
+               .offset = cpu_to_le16(offset),
+               .length = cpu_to_le16(length),
+               .type = cpu_to_le16(section),
+               .op_code = NVM_READ_OPCODE,
+       };
        struct iwl_nvm_access_resp *nvm_resp;
        struct iwl_rx_packet *pkt;
        struct iwl_host_cmd cmd = {
                .id = NVM_ACCESS_CMD,
-               .flags = CMD_SYNC | CMD_WANT_SKB,
+               .flags = CMD_SYNC | CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
                .data = { &nvm_access_cmd, },
        };
        int ret, bytes_read, offset_read;
        u8 *resp_data;
 
-       iwl_nvm_fill_read(&nvm_access_cmd, offset, length, section);
        cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
 
        ret = iwl_mvm_send_cmd(mvm, &cmd);
@@ -144,6 +170,30 @@ exit:
        return ret;
 }
 
+static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
+                                const u8 *data, u16 length)
+{
+       int offset = 0;
+
+       /* copy data in chunks of 2k (and remainder if any) */
+
+       while (offset < length) {
+               int chunk_size, ret;
+
+               chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE,
+                                length - offset);
+
+               ret = iwl_nvm_write_chunk(mvm, section, offset,
+                                         chunk_size, data + offset);
+               if (ret < 0)
+                       return ret;
+
+               offset += chunk_size;
+       }
+
+       return 0;
+}
+
 /*
  * Reads an NVM section completely.
  * NICs prior to 7000 family doesn't have a real NVM, but just read
@@ -177,7 +227,8 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
                offset += ret;
        }
 
-       IWL_INFO(mvm, "NVM section %d read completed\n", section);
+       IWL_DEBUG_EEPROM(mvm->trans->dev,
+                        "NVM section %d read completed\n", section);
        return offset;
 }
 
@@ -200,7 +251,130 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data;
        sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
        calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
-       return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib);
+       return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
+                                 iwl_fw_valid_tx_ant(mvm->fw),
+                                 iwl_fw_valid_rx_ant(mvm->fw));
+}
+
+#define MAX_NVM_FILE_LEN       16384
+
+/*
+ * HOW TO CREATE THE NVM FILE FORMAT:
+ * ------------------------------
+ * 1. create hex file, format:
+ *      3800 -> header
+ *      0000 -> header
+ *      5a40 -> data
+ *
+ *   rev - 6 bit (word1)
+ *   len - 10 bit (word1)
+ *   id - 4 bit (word2)
+ *   rsv - 12 bit (word2)
+ *
+ * 2. flip 8bits with 8 bits per line to get the right NVM file format
+ *
+ * 3. create binary file from the hex file
+ *
+ * 4. save as "iNVM_xxx.bin" under /lib/firmware
+ */
+static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
+{
+       int ret, section_id, section_size;
+       const struct firmware *fw_entry;
+       const struct {
+               __le16 word1;
+               __le16 word2;
+               u8 data[];
+       } *file_sec;
+       const u8 *eof;
+
+#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
+#define NVM_WORD2_ID(x) (x >> 12)
+
+       /*
+        * Obtain NVM image via request_firmware. Since we already used
+        * request_firmware_nowait() for the firmware binary load and only
+        * get here after that we assume the NVM request can be satisfied
+        * synchronously.
+        */
+       ret = request_firmware(&fw_entry, iwlwifi_mod_params.nvm_file,
+                              mvm->trans->dev);
+       if (ret) {
+               IWL_ERR(mvm, "ERROR: %s isn't available %d\n",
+                       iwlwifi_mod_params.nvm_file, ret);
+               return ret;
+       }
+
+       IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n",
+                iwlwifi_mod_params.nvm_file, fw_entry->size);
+
+       if (fw_entry->size < sizeof(*file_sec)) {
+               IWL_ERR(mvm, "NVM file too small\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (fw_entry->size > MAX_NVM_FILE_LEN) {
+               IWL_ERR(mvm, "NVM file too large\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       eof = fw_entry->data + fw_entry->size;
+
+       file_sec = (void *)fw_entry->data;
+
+       while (true) {
+               if (file_sec->data > eof) {
+                       IWL_ERR(mvm,
+                               "ERROR - NVM file too short for section header\n");
+                       ret = -EINVAL;
+                       break;
+               }
+
+               /* check for EOF marker */
+               if (!file_sec->word1 && !file_sec->word2) {
+                       ret = 0;
+                       break;
+               }
+
+               section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
+               section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
+
+               if (section_size > IWL_MAX_NVM_SECTION_SIZE) {
+                       IWL_ERR(mvm, "ERROR - section too large (%d)\n",
+                               section_size);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (!section_size) {
+                       IWL_ERR(mvm, "ERROR - section empty\n");
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (file_sec->data + section_size > eof) {
+                       IWL_ERR(mvm,
+                               "ERROR - NVM file too short for section (%d bytes)\n",
+                               section_size);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               ret = iwl_nvm_write_section(mvm, section_id, file_sec->data,
+                                           section_size);
+               if (ret < 0) {
+                       IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+                       break;
+               }
+
+               /* advance to the next section */
+               file_sec = (void *)(file_sec->data + section_size);
+       }
+out:
+       release_firmware(fw_entry);
+       return ret;
 }
 
 int iwl_nvm_init(struct iwl_mvm *mvm)
@@ -208,6 +382,17 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
        int ret, i, section;
        u8 *nvm_buffer, *temp;
 
+       /* load external NVM if configured */
+       if (iwlwifi_mod_params.nvm_file) {
+               /* move to External NVM flow */
+               ret = iwl_mvm_load_external_nvm(mvm);
+               if (ret)
+                       return ret;
+       }
+
+       /* Read From FW NVM */
+       IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
+
        /* TODO: find correct NVM max size for a section */
        nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
                             GFP_KERNEL);
@@ -231,8 +416,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
        if (ret < 0)
                return ret;
 
-       ret = 0;
        mvm->nvm_data = iwl_parse_nvm_sections(mvm);
+       if (!mvm->nvm_data)
+               return -ENODATA;
 
-       return ret;
+       return 0;
 }
index b29c31a41594ecbfd66989e0eaaa020758007aac..af79a14063a9bcffbb0a624d3b93f97160dbe057 100644 (file)
@@ -215,17 +215,22 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
        RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
        RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
+
+       RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
+       RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
+       RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
+
        RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
 
        RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
        RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
 
-       RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
-       RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
-
        RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
        RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
 
+       RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
+                  false),
+
        RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
 };
 #undef RX_HANDLER
@@ -288,11 +293,14 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(NET_DETECT_HOTSPOTS_CMD),
        CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
        CMD(CARD_STATE_NOTIFICATION),
+       CMD(MISSED_BEACONS_NOTIFICATION),
        CMD(BT_COEX_PRIO_TABLE),
        CMD(BT_COEX_PROT_ENV),
        CMD(BT_PROFILE_NOTIFICATION),
        CMD(BT_CONFIG),
        CMD(MCAST_FILTER_CMD),
+       CMD(REPLY_BEACON_FILTERING_CMD),
+       CMD(REPLY_THERMAL_MNG_BACKOFF),
 };
 #undef CMD
 
@@ -393,10 +401,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        if (err)
                goto out_free;
 
+       iwl_mvm_tt_initialize(mvm);
+
        mutex_lock(&mvm->mutex);
        err = iwl_run_init_mvm_ucode(mvm, true);
        mutex_unlock(&mvm->mutex);
-       if (err && !iwlmvm_mod_params.init_dbg) {
+       /* returns 0 if successful, 1 if success but in rfkill */
+       if (err < 0 && !iwlmvm_mod_params.init_dbg) {
                IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
                goto out_free;
        }
@@ -439,10 +450,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
 
        iwl_mvm_leds_exit(mvm);
 
+       iwl_mvm_tt_exit(mvm);
+
        ieee80211_unregister_hw(mvm->hw);
 
        kfree(mvm->scan_cmd);
 
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
+       kfree(mvm->d3_resume_sram);
+#endif
+
        iwl_trans_stop_hw(mvm->trans, true);
 
        iwl_phy_db_free(mvm->phy_db);
@@ -589,6 +606,16 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
        ieee80211_wake_queue(mvm->hw, mq);
 }
 
+void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
+{
+       if (state)
+               set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+       else
+               clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+
+       wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
+}
+
 static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
 {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
@@ -598,7 +625,7 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
        else
                clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
 
-       wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+       wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
 }
 
 static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
index a28a1d1f23eb8caba29ff4501c1962a527f6e50d..a8652ddd6bedb54ed7bd045d44fc414c618b5ce9 100644 (file)
@@ -195,21 +195,6 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
        return ret;
 }
 
-
-struct phy_ctx_used_data {
-       unsigned long used[BITS_TO_LONGS(NUM_PHY_CTX)];
-};
-
-static void iwl_mvm_phy_ctx_used_iter(struct ieee80211_hw *hw,
-                                     struct ieee80211_chanctx_conf *ctx,
-                                     void *_data)
-{
-       struct phy_ctx_used_data *data = _data;
-       struct iwl_mvm_phy_ctxt *phy_ctxt = (void *)ctx->drv_priv;
-
-       __set_bit(phy_ctxt->id, data->used);
-}
-
 /*
  * Send a command to add a PHY context based on the current HW configuration.
  */
@@ -217,34 +202,28 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
                         struct cfg80211_chan_def *chandef,
                         u8 chains_static, u8 chains_dynamic)
 {
-       struct phy_ctx_used_data data = {
-               .used = { },
-       };
-
-       /*
-        * If this is a regular PHY context (not the ROC one)
-        * skip the ROC PHY context's ID.
-        */
-       if (ctxt != &mvm->phy_ctxt_roc)
-               __set_bit(mvm->phy_ctxt_roc.id, data.used);
+       int ret;
 
+       WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+               ctxt->ref);
        lockdep_assert_held(&mvm->mutex);
-       ctxt->color++;
 
-       if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
-               ieee80211_iter_chan_contexts_atomic(
-                       mvm->hw, iwl_mvm_phy_ctx_used_iter, &data);
+       ctxt->channel = chandef->chan;
+       ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+                                    chains_static, chains_dynamic,
+                                    FW_CTXT_ACTION_ADD, 0);
 
-               ctxt->id = find_first_zero_bit(data.used, NUM_PHY_CTX);
-               if (WARN_ONCE(ctxt->id == NUM_PHY_CTX,
-                             "Failed to init PHY context - no free ID!\n"))
-                       return -EIO;
-       }
+       return ret;
+}
 
-       ctxt->channel = chandef->chan;
-       return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
-                                     chains_static, chains_dynamic,
-                                     FW_CTXT_ACTION_ADD, 0);
+/*
+ * Update the number of references to the given PHY context. This is valid only
+ * in case the PHY context was already created, i.e., its reference count > 0.
+ */
+void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+{
+       lockdep_assert_held(&mvm->mutex);
+       ctxt->ref++;
 }
 
 /*
@@ -264,23 +243,12 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
                                      FW_CTXT_ACTION_MODIFY, 0);
 }
 
-/*
- * Send a command to the FW to remove the given phy context.
- * Once the command is sent, regardless of success or failure, the context is
- * marked as invalid
- */
-void iwl_mvm_phy_ctxt_remove(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
 {
-       struct iwl_phy_context_cmd cmd;
-       int ret;
-
        lockdep_assert_held(&mvm->mutex);
 
-       iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, FW_CTXT_ACTION_REMOVE, 0);
-       ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, CMD_SYNC,
-                                  sizeof(struct iwl_phy_context_cmd),
-                                  &cmd);
-       if (ret)
-               IWL_ERR(mvm, "Failed to send PHY remove: ctxt id=%d\n",
-                       ctxt->id);
+       if (WARN_ON_ONCE(!ctxt))
+               return;
+
+       ctxt->ref--;
 }
index ed77e437aac49d513525b12725f3d70baad461e9..3760a33ca3a40cbb13a266cd7c96912017bfce1e 100644 (file)
 
 #define POWER_KEEP_ALIVE_PERIOD_SEC    25
 
+static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
+                                         struct iwl_beacon_filter_cmd *cmd)
+{
+       int ret;
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC,
+                                  sizeof(struct iwl_beacon_filter_cmd), cmd);
+
+       if (!ret) {
+               IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
+                               cmd->ba_enable_beacon_abort);
+               IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
+                               cmd->ba_escape_timer);
+               IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
+                               cmd->bf_debug_flag);
+               IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
+                               cmd->bf_enable_beacon_filter);
+               IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
+                               cmd->bf_energy_delta);
+               IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
+                               cmd->bf_escape_timer);
+               IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
+                               cmd->bf_roaming_energy_delta);
+               IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
+                               cmd->bf_roaming_state);
+               IWL_DEBUG_POWER(mvm, "bf_temperature_delta is: %d\n",
+                               cmd->bf_temperature_delta);
+       }
+       return ret;
+}
+
+static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif, bool enable)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_beacon_filter_cmd cmd = {
+               IWL_BF_CMD_CONFIG_DEFAULTS,
+               .bf_enable_beacon_filter = 1,
+               .ba_enable_beacon_abort = enable,
+       };
+
+       if (!mvmvif->bf_enabled)
+               return 0;
+
+       iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+       return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+}
+
 static void iwl_mvm_power_log(struct iwl_mvm *mvm,
                              struct iwl_powertable_cmd *cmd)
 {
@@ -91,6 +139,9 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm,
                                le32_to_cpu(cmd->tx_data_timeout));
                IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
                                cmd->lprx_rssi_threshold);
+               if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
+                       IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
+                                       le32_to_cpu(cmd->skip_dtim_periods));
        }
 }
 
@@ -103,6 +154,8 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        int dtimper, dtimper_msec;
        int keep_alive;
        bool radar_detect = false;
+       struct iwl_mvm_vif *mvmvif __maybe_unused =
+               iwl_mvm_vif_from_mac80211(vif);
 
        /*
         * Regardless of power management state the driver must set
@@ -115,7 +168,14 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return;
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+       if (!vif->bss_conf.assoc)
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
+           mvmvif->dbgfs_pm.disable_power_off)
+               cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
        if (!vif->bss_conf.ps)
                return;
 
@@ -135,8 +195,11 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        /* Check skip over DTIM conditions */
        if (!radar_detect && (dtimper <= 10) &&
-           (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP))
+           (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
+            mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
                cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+               cmd->skip_dtim_periods = cpu_to_le32(3);
+       }
 
        /* Check that keep alive period is at least 3 * DTIM */
        dtimper_msec = dtimper * vif->bss_conf.beacon_int;
@@ -145,27 +208,76 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
        cmd->keep_alive_seconds = keep_alive;
 
-       cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
-       cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+       if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
+               cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+               cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+       } else {
+               cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+               cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+       }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
+               cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
+               if (mvmvif->dbgfs_pm.skip_over_dtim)
+                       cmd->flags |=
+                               cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+               else
+                       cmd->flags &=
+                               cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+       }
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
+               cmd->rx_data_timeout =
+                       cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
+               cmd->tx_data_timeout =
+                       cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
+               cmd->skip_dtim_periods =
+                       cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
 }
 
 int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
+       int ret;
+       bool ba_enable;
        struct iwl_powertable_cmd cmd = {};
 
        if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
+       /*
+        * TODO: The following vif_count verification is temporary condition.
+        * Avoid power mode update if more than one interface is currently
+        * active. Remove this condition when FW will support power management
+        * on multiple MACs.
+        */
+       IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
+                       mvm->vif_count);
+       if (mvm->vif_count > 1)
+               return 0;
+
        iwl_mvm_power_build_cmd(mvm, vif, &cmd);
        iwl_mvm_power_log(mvm, &cmd);
 
-       return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
-                                   sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+                                  sizeof(cmd), &cmd);
+       if (ret)
+               return ret;
+
+       ba_enable = !!(cmd.flags &
+                      cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
+
+       return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
 }
 
 int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
        struct iwl_powertable_cmd cmd = {};
+       struct iwl_mvm_vif *mvmvif __maybe_unused =
+               iwl_mvm_vif_from_mac80211(vif);
 
        if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
@@ -173,8 +285,82 @@ int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
                cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
+           mvmvif->dbgfs_pm.disable_power_off)
+               cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
        iwl_mvm_power_log(mvm, &cmd);
 
        return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
                                    sizeof(cmd), &cmd);
 }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+                                        struct iwl_beacon_filter_cmd *cmd)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+       if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
+               cmd->bf_energy_delta = dbgfs_bf->bf_energy_delta;
+       if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
+               cmd->bf_roaming_energy_delta =
+                                dbgfs_bf->bf_roaming_energy_delta;
+       if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
+               cmd->bf_roaming_state = dbgfs_bf->bf_roaming_state;
+       if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMPERATURE_DELTA)
+               cmd->bf_temperature_delta = dbgfs_bf->bf_temperature_delta;
+       if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
+               cmd->bf_debug_flag = dbgfs_bf->bf_debug_flag;
+       if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
+               cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
+       if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
+               cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
+       if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
+               cmd->ba_enable_beacon_abort = dbgfs_bf->ba_enable_beacon_abort;
+}
+#endif
+
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_beacon_filter_cmd cmd = {
+               IWL_BF_CMD_CONFIG_DEFAULTS,
+               .bf_enable_beacon_filter = 1,
+       };
+       int ret;
+
+       if (mvmvif != mvm->bf_allowed_vif ||
+           vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+               return 0;
+
+       iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+       ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+
+       if (!ret)
+               mvmvif->bf_enabled = true;
+
+       return ret;
+}
+
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_beacon_filter_cmd cmd = {};
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int ret;
+
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+               return 0;
+
+       ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+
+       if (!ret)
+               mvmvif->bf_enabled = false;
+
+       return ret;
+}
index a1e3e923ea3e12c1fb6a0adabab88d05b1c5530e..29d49cf0fdb207893091e30542d104e34e829d5f 100644 (file)
@@ -169,27 +169,34 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
                        num_active_bindings++;
        }
 
-       if (!num_active_bindings)
-               goto send_cmd;
-
-       quota = IWL_MVM_MAX_QUOTA / num_active_bindings;
-       quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings;
+       quota = 0;
+       quota_rem = 0;
+       if (num_active_bindings) {
+               quota = IWL_MVM_MAX_QUOTA / num_active_bindings;
+               quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings;
+       }
 
        for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
-               if (data.n_interfaces[i] <= 0)
+               if (data.colors[i] < 0)
                        continue;
 
                cmd.quotas[idx].id_and_color =
                        cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
-               cmd.quotas[idx].quota = cpu_to_le32(quota);
-               cmd.quotas[idx].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+
+               if (data.n_interfaces[i] <= 0) {
+                       cmd.quotas[idx].quota = cpu_to_le32(0);
+                       cmd.quotas[idx].max_duration = cpu_to_le32(0);
+               } else {
+                       cmd.quotas[idx].quota = cpu_to_le32(quota);
+                       cmd.quotas[idx].max_duration =
+                               cpu_to_le32(IWL_MVM_MAX_QUOTA);
+               }
                idx++;
        }
 
        /* Give the remainder of the session to the first binding */
        le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
 
-send_cmd:
        ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
                                   sizeof(cmd), &cmd);
        if (ret)
index b99fe3163866169c1bfc0c2091a0cae062205354..31587a318f8b1691ddf12508004379d85ffc80c1 100644 (file)
@@ -401,6 +401,17 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
 
        load = rs_tl_get_load(lq_data, tid);
 
+       /*
+        * Don't create TX aggregation sessions when in high
+        * BT traffic, as they would just be disrupted by BT.
+        */
+       if (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) {
+               IWL_DEBUG_COEX(mvm, "BT traffic (%d), no aggregation allowed\n",
+                              BT_MBOX_MSG(&mvm->last_bt_notif,
+                                          3, TRAFFIC_LOAD));
+               return ret;
+       }
+
        if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
                IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
                             sta->addr, tid);
@@ -1519,6 +1530,29 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
        u8 update_search_tbl_counter = 0;
        int ret;
 
+       switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+       case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
+               /* nothing */
+               break;
+       case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
+               /* avoid antenna B unless MIMO */
+               if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
+                       tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
+               break;
+       case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
+       case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
+               /* avoid antenna B and MIMO */
+               valid_tx_ant =
+                       first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
+               if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
+                       tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+               break;
+       default:
+               IWL_ERR(mvm, "Invalid BT load %d",
+                       BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
+               break;
+       }
+
        start_action = tbl->action;
        while (1) {
                lq_sta->action_counter++;
@@ -1532,7 +1566,9 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
                             tx_chains_num <= 2))
                                break;
 
-                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+                       if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
+                           BT_MBOX_MSG(&mvm->last_bt_notif, 3,
+                                       TRAFFIC_LOAD) == 0)
                                break;
 
                        memcpy(search_tbl, tbl, sz);
@@ -1654,6 +1690,28 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
        u8 update_search_tbl_counter = 0;
        int ret;
 
+       switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+       case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
+               /* nothing */
+               break;
+       case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
+       case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
+               /* avoid antenna B and MIMO */
+               if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
+                       tbl->action = IWL_MIMO2_SWITCH_SISO_A;
+               break;
+       case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
+               /* avoid antenna B unless MIMO */
+               if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
+                   tbl->action == IWL_MIMO2_SWITCH_SISO_C)
+                       tbl->action = IWL_MIMO2_SWITCH_SISO_A;
+               break;
+       default:
+               IWL_ERR(mvm, "Invalid BT load %d",
+                       BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
+               break;
+       }
+
        start_action = tbl->action;
        while (1) {
                lq_sta->action_counter++;
@@ -1791,6 +1849,28 @@ static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
        int ret;
        u8 update_search_tbl_counter = 0;
 
+       switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+       case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
+               /* nothing */
+               break;
+       case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
+       case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
+               /* avoid antenna B and MIMO */
+               if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
+                       tbl->action = IWL_MIMO3_SWITCH_SISO_A;
+               break;
+       case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
+               /* avoid antenna B unless MIMO */
+               if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
+                   tbl->action == IWL_MIMO3_SWITCH_SISO_C)
+                       tbl->action = IWL_MIMO3_SWITCH_SISO_A;
+               break;
+       default:
+               IWL_ERR(mvm, "Invalid BT load %d",
+                       BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
+               break;
+       }
+
        start_action = tbl->action;
        while (1) {
                lq_sta->action_counter++;
@@ -2302,6 +2382,32 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
             (current_tpt > (100 * tbl->expected_tpt[low]))))
                scale_action = 0;
 
+       if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+            IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
+            (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
+               if (lq_sta->last_bt_traffic >
+                   BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+                       /*
+                        * don't set scale_action, don't want to scale up if
+                        * the rate scale doesn't otherwise think that is a
+                        * good idea.
+                        */
+               } else if (lq_sta->last_bt_traffic <=
+                          BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+                       scale_action = -1;
+               }
+       }
+       lq_sta->last_bt_traffic =
+               BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
+
+       if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+            IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
+            (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
+               /* search for a new modulation */
+               rs_stay_in_table(lq_sta, true);
+               goto lq_update;
+       }
+
        switch (scale_action) {
        case -1:
                /* Decrease starting rate, update uCode's rate table */
@@ -2783,6 +2889,13 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
 
        lq_cmd->agg_time_limit =
                cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+       /*
+        * overwrite if needed, pass aggregation time limit
+        * to uCode in uSec - This is racy - but heh, at least it helps...
+        */
+       if (mvm && BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2)
+               lq_cmd->agg_time_limit = cpu_to_le16(1200);
 }
 
 static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -3081,3 +3194,29 @@ void iwl_mvm_rate_control_unregister(void)
 {
        ieee80211_rate_control_unregister(&rs_mvm_ops);
 }
+
+/**
+ * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
+ * Tx protection, according to this rquest and previous requests,
+ * and send the LQ command.
+ * @lq: The LQ command
+ * @mvmsta: The station
+ * @enable: Enable Tx protection?
+ */
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
+                         struct iwl_mvm_sta *mvmsta, bool enable)
+{
+       lockdep_assert_held(&mvm->mutex);
+
+       if (enable) {
+               if (mvmsta->tx_protection == 0)
+                       lq->flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK;
+               mvmsta->tx_protection++;
+       } else {
+               mvmsta->tx_protection--;
+               if (mvmsta->tx_protection == 0)
+                       lq->flags &= ~LQ_FLAG_SET_STA_TLC_RTS_MSK;
+       }
+
+       return iwl_mvm_send_lq_cmd(mvm, lq, CMD_ASYNC, false);
+}
index 219c6857cc0f3abe71390b58ebae41449a5535f5..cff4f6da77338a4056c585ef9ec2368d4416c6a6 100644 (file)
@@ -358,6 +358,18 @@ struct iwl_lq_sta {
        u8 last_bt_traffic;
 };
 
+enum iwl_bt_coex_profile_traffic_load {
+       IWL_BT_COEX_TRAFFIC_LOAD_NONE           = 0,
+       IWL_BT_COEX_TRAFFIC_LOAD_LOW            = 1,
+       IWL_BT_COEX_TRAFFIC_LOAD_HIGH           = 2,
+       IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS     = 3,
+/*
+ * There are no more even though below is a u8, the
+ * indication from the BT device only has two bits.
+ */
+};
+
+
 static inline u8 num_of_ant(u8 mask)
 {
        return  !!((mask) & ANT_A) +
@@ -390,4 +402,9 @@ extern int iwl_mvm_rate_control_register(void);
  */
 extern void iwl_mvm_rate_control_unregister(void);
 
+struct iwl_mvm_sta;
+
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
+                         struct iwl_mvm_sta *mvmsta, bool enable);
+
 #endif /* __rs__ */
index 4dfc21a3e83ed3a1ab127baf1a7864a2fdc4cc9d..e4930d5027d228ff4e411ec2e8c6289e1c4366d2 100644 (file)
@@ -363,3 +363,25 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                                        rxb, &rx_status);
        return 0;
 }
+
+/*
+ * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler
+ *
+ * TODO: This handler is implemented partially.
+ * It only gets the NIC's temperature.
+ */
+int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+                         struct iwl_rx_cmd_buffer *rxb,
+                         struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_notif_statistics *stats = (void *)&pkt->data;
+       struct mvm_statistics_general_common *common = &stats->general.common;
+
+       if (mvm->temperature != le32_to_cpu(common->temperature)) {
+               mvm->temperature = le32_to_cpu(common->temperature);
+               iwl_mvm_tt_handler(mvm);
+       }
+
+       return 0;
+}
index 2476e43799d5e6d5bfd182e62b30b3cbe77020a9..2157b0f8ced5cc7c314a246d758a869128e57c59 100644 (file)
@@ -298,12 +298,6 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
        else
                cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
 
-       /*
-        * TODO: This is a WA due to a bug in the FW AUX framework that does not
-        * properly handle time events that fail to be scheduled
-        */
-       cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
-
        cmd->repeats = cpu_to_le32(1);
 
        /*
index 5c664ed54400ed65c5232c9092c62dceaeadfdcd..62fe5209093bf7637cb024b2c81dfd301cfd4649 100644 (file)
@@ -64,6 +64,7 @@
 
 #include "mvm.h"
 #include "sta.h"
+#include "rs.h"
 
 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
 {
@@ -217,6 +218,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                                                      mvmvif->color);
        mvm_sta->vif = vif;
        mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+       mvm_sta->tx_protection = 0;
+       mvm_sta->tt_tx_protection = false;
 
        /* HW restart, don't assume the memory has been zeroed */
        atomic_set(&mvm->pending_frames[sta_id], 0);
@@ -226,9 +229,6 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
                        mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
 
-       if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE)
-               mvm_sta->tfd_queue_msk |= BIT(vif->cab_queue);
-
        /* for HW restart - need to reset the seq_number etc... */
        memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
 
@@ -798,21 +798,23 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                min(mvmsta->max_agg_bufsize, buf_size);
        mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
 
+       IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
+                    sta->addr, tid);
+
        if (mvm->cfg->ht_params->use_rts_for_aggregation) {
                /*
                 * switch to RTS/CTS if it is the prefer protection
                 * method for HT traffic
+                * this function also sends the LQ command
                 */
-               mvmsta->lq_sta.lq.flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK;
+               return iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq,
+                                            mvmsta, true);
                /*
                 * TODO: remove the TLC_RTS flag when we tear down the last
                 * AGG session (agg_tids_count in DVM)
                 */
        }
 
-       IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
-                    sta->addr, tid);
-
        return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, CMD_ASYNC, false);
 }
 
@@ -1287,17 +1289,11 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
        struct iwl_mvm_add_sta_cmd cmd = {
                .add_modify = STA_MODE_MODIFY,
                .sta_id = mvmsta->sta_id,
-               .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
-               .sleep_state_flags = cpu_to_le16(STA_SLEEP_STATE_AWAKE),
+               .station_flags_msk = cpu_to_le32(STA_FLG_PS),
                .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
        };
        int ret;
 
-       /*
-        * Same modify mask for sleep_tx_count and sleep_state_flags but this
-        * should be fine since if we set the STA as "awake", then
-        * sleep_tx_count is not relevant.
-        */
        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
index a4ddce77aaaefa1eff7599ea275a9663b6cee648..94b265eb32b82bfb7a5f467cef60963f5da8d252 100644 (file)
@@ -250,7 +250,6 @@ enum iwl_mvm_agg_state {
  *     the first packet to be sent in legacy HW queue in Tx AGG stop flow.
  *     Basically when next_reclaimed reaches ssn, we can tell mac80211 that
  *     we are ready to finish the Tx AGG stop / start flow.
- * @wait_for_ba: Expect block-ack before next Tx reply
  */
 struct iwl_mvm_tid_data {
        u16 seq_number;
@@ -260,7 +259,6 @@ struct iwl_mvm_tid_data {
        enum iwl_mvm_agg_state state;
        u16 txq_id;
        u16 ssn;
-       bool wait_for_ba;
 };
 
 /**
@@ -275,6 +273,8 @@ struct iwl_mvm_tid_data {
  * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
  * and from Tx response flow, it needs a spinlock.
  * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
+ * @tx_protection: reference counter for controlling the Tx protection.
+ * @tt_tx_protection: is thermal throttling enable Tx protection?
  *
  * When mac80211 creates a station it reserves some space (hw->sta_data_size)
  * in the structure for use by driver. This structure is placed in that
@@ -296,6 +296,10 @@ struct iwl_mvm_sta {
 #ifdef CONFIG_PM_SLEEP
        u16 last_seq_ctl;
 #endif
+
+       /* Temporary, until the new TLC will control the Tx protection */
+       s8 tx_protection;
+       bool tt_tx_protection;
 };
 
 /**
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
new file mode 100644 (file)
index 0000000..a7e3b8d
--- /dev/null
@@ -0,0 +1,512 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "mvm.h"
+#include "iwl-config.h"
+#include "iwl-io.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+
+#define OTP_DTS_DIODE_DEVIATION 96 /*in words*/
+/* VBG - Voltage Band Gap error data (temperature offset) */
+#define OTP_WP_DTS_VBG                 (OTP_DTS_DIODE_DEVIATION + 2)
+#define MEAS_VBG_MIN_VAL               2300
+#define MEAS_VBG_MAX_VAL               3000
+#define MEAS_VBG_DEFAULT_VAL           2700
+#define DTS_DIODE_VALID(flags)         (flags & DTS_DIODE_REG_FLAGS_PASS_ONCE)
+#define MIN_TEMPERATURE                        0
+#define MAX_TEMPERATURE                        125
+#define TEMPERATURE_ERROR              (MAX_TEMPERATURE + 1)
+#define PTAT_DIGITAL_VALUE_MIN_VALUE   0
+#define PTAT_DIGITAL_VALUE_MAX_VALUE   0xFF
+#define DTS_VREFS_NUM                  5
+static inline u32 DTS_DIODE_GET_VREFS_ID(u32 flags)
+{
+       return (flags & DTS_DIODE_REG_FLAGS_VREFS_ID) >>
+                                       DTS_DIODE_REG_FLAGS_VREFS_ID_POS;
+}
+
+#define CALC_VREFS_MIN_DIFF    43
+#define CALC_VREFS_MAX_DIFF    51
+#define CALC_LUT_SIZE          (1 + CALC_VREFS_MAX_DIFF - CALC_VREFS_MIN_DIFF)
+#define CALC_LUT_INDEX_OFFSET  CALC_VREFS_MIN_DIFF
+#define CALC_TEMPERATURE_RESULT_SHIFT_OFFSET   23
+
+/*
+ * @digital_value: The diode's digital-value sampled (temperature/voltage)
+ * @vref_low: The lower voltage-reference (the vref just below the diode's
+ *     sampled digital-value)
+ * @vref_high: The higher voltage-reference (the vref just above the diode's
+ *     sampled digital-value)
+ * @flags: bits[1:0]: The ID of the Vrefs pair (lowVref,highVref)
+ *     bits[6:2]: Reserved.
+ *     bits[7:7]: Indicates completion of at least 1 successful sample
+ *     since last DTS reset.
+ */
+struct iwl_mvm_dts_diode_bits {
+       u8 digital_value;
+       u8 vref_low;
+       u8 vref_high;
+       u8 flags;
+} __packed;
+
+union dts_diode_results {
+       u32 reg_value;
+       struct iwl_mvm_dts_diode_bits bits;
+} __packed;
+
+static s16 iwl_mvm_dts_get_volt_band_gap(struct iwl_mvm *mvm)
+{
+       struct iwl_nvm_section calib_sec;
+       const __le16 *calib;
+       u16 vbg;
+
+       /* TODO: move parsing to NVM code */
+       calib_sec = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION];
+       calib = (__le16 *)calib_sec.data;
+
+       vbg = le16_to_cpu(calib[OTP_WP_DTS_VBG]);
+
+       if (vbg < MEAS_VBG_MIN_VAL || vbg > MEAS_VBG_MAX_VAL)
+               vbg = MEAS_VBG_DEFAULT_VAL;
+
+       return vbg;
+}
+
+static u16 iwl_mvm_dts_get_ptat_deviation_offset(struct iwl_mvm *mvm)
+{
+       const u8 *calib;
+       u8 ptat, pa1, pa2, median;
+
+       /* TODO: move parsing to NVM code */
+       calib = mvm->nvm_sections[NVM_SECTION_TYPE_CALIBRATION].data;
+       ptat = calib[OTP_DTS_DIODE_DEVIATION];
+       pa1 = calib[OTP_DTS_DIODE_DEVIATION + 1];
+       pa2 = calib[OTP_DTS_DIODE_DEVIATION + 2];
+
+       /* get the median: */
+       if (ptat > pa1) {
+               if (ptat > pa2)
+                       median = (pa1 > pa2) ? pa1 : pa2;
+               else
+                       median = ptat;
+       } else {
+               if (pa1 > pa2)
+                       median = (ptat > pa2) ? ptat : pa2;
+               else
+                       median = pa1;
+       }
+
+       return ptat - median;
+}
+
+static u8 iwl_mvm_dts_calibrate_ptat_deviation(struct iwl_mvm *mvm, u8 value)
+{
+       /* Calibrate the PTAT digital value, based on PTAT deviation data: */
+       s16 new_val = value - iwl_mvm_dts_get_ptat_deviation_offset(mvm);
+
+       if (new_val > PTAT_DIGITAL_VALUE_MAX_VALUE)
+               new_val = PTAT_DIGITAL_VALUE_MAX_VALUE;
+       else if (new_val < PTAT_DIGITAL_VALUE_MIN_VALUE)
+               new_val = PTAT_DIGITAL_VALUE_MIN_VALUE;
+
+       return new_val;
+}
+
+static bool dts_get_adjacent_vrefs(struct iwl_mvm *mvm,
+                                  union dts_diode_results *avg_ptat)
+{
+       u8 vrefs_results[DTS_VREFS_NUM];
+       u8 low_vref_index = 0, flags;
+       u32 reg;
+
+       reg = iwl_read_prph(mvm->trans, DTSC_VREF_AVG);
+       memcpy(vrefs_results, &reg, sizeof(reg));
+       reg = iwl_read_prph(mvm->trans, DTSC_VREF5_AVG);
+       vrefs_results[4] = reg & 0xff;
+
+       if (avg_ptat->bits.digital_value < vrefs_results[0] ||
+           avg_ptat->bits.digital_value > vrefs_results[4])
+               return false;
+
+       if (avg_ptat->bits.digital_value > vrefs_results[3])
+               low_vref_index = 3;
+       else if (avg_ptat->bits.digital_value > vrefs_results[2])
+               low_vref_index = 2;
+       else if (avg_ptat->bits.digital_value > vrefs_results[1])
+               low_vref_index = 1;
+
+       avg_ptat->bits.vref_low  = vrefs_results[low_vref_index];
+       avg_ptat->bits.vref_high = vrefs_results[low_vref_index + 1];
+       flags = avg_ptat->bits.flags;
+       avg_ptat->bits.flags =
+               (flags & ~DTS_DIODE_REG_FLAGS_VREFS_ID) |
+               (low_vref_index & DTS_DIODE_REG_FLAGS_VREFS_ID);
+       return true;
+}
+
+/*
+ * return true it the results are valid, and false otherwise.
+ */
+static bool dts_read_ptat_avg_results(struct iwl_mvm *mvm,
+                                     union dts_diode_results *avg_ptat)
+{
+       u32 reg;
+       u8 tmp;
+
+       /* fill the diode value and pass_once with avg-reg results */
+       reg = iwl_read_prph(mvm->trans, DTSC_PTAT_AVG);
+       reg &= DTS_DIODE_REG_DIG_VAL | DTS_DIODE_REG_PASS_ONCE;
+       avg_ptat->reg_value = reg;
+
+       /* calibrate the PTAT digital value */
+       tmp = avg_ptat->bits.digital_value;
+       tmp = iwl_mvm_dts_calibrate_ptat_deviation(mvm, tmp);
+       avg_ptat->bits.digital_value = tmp;
+
+       /*
+        * fill vrefs fields, based on the avgVrefs results
+        * and the diode value
+        */
+       return dts_get_adjacent_vrefs(mvm, avg_ptat) &&
+               DTS_DIODE_VALID(avg_ptat->bits.flags);
+}
+
+static s32 calculate_nic_temperature(union dts_diode_results avg_ptat,
+                                    u16 volt_band_gap)
+{
+       u32 tmp_result;
+       u8 vrefs_diff;
+       /*
+        * For temperature calculation (at the end, shift right by 23)
+        * LUT[(D2-D1)] = ROUND{ 2^23 / ((D2-D1)*9*10) }
+        * (D2-D1) ==   43    44    45    46    47    48    49    50    51
+        */
+       static const u16 calc_lut[CALC_LUT_SIZE] = {
+               2168, 2118, 2071, 2026, 1983, 1942, 1902, 1864, 1828,
+       };
+
+       /*
+        * The diff between the high and low voltage-references is assumed
+        * to be strictly be in range of [60,68]
+        */
+       vrefs_diff = avg_ptat.bits.vref_high - avg_ptat.bits.vref_low;
+
+       if (vrefs_diff < CALC_VREFS_MIN_DIFF ||
+           vrefs_diff > CALC_VREFS_MAX_DIFF)
+               return TEMPERATURE_ERROR;
+
+       /* calculate the result: */
+       tmp_result =
+               vrefs_diff * (DTS_DIODE_GET_VREFS_ID(avg_ptat.bits.flags) + 9);
+       tmp_result += avg_ptat.bits.digital_value;
+       tmp_result -= avg_ptat.bits.vref_high;
+
+       /* multiply by the LUT value (based on the diff) */
+       tmp_result *= calc_lut[vrefs_diff - CALC_LUT_INDEX_OFFSET];
+
+       /*
+        * Get the BandGap (the voltage refereces source) error data
+        * (temperature offset)
+        */
+       tmp_result *= volt_band_gap;
+
+       /*
+        * here, tmp_result value can be up to 32-bits. We want to right-shift
+        * it *without* sign-extend.
+        */
+       tmp_result = tmp_result >> CALC_TEMPERATURE_RESULT_SHIFT_OFFSET;
+
+       /*
+        * at this point, tmp_result should be in the range:
+        * 200 <= tmp_result <= 365
+        */
+       return (s16)tmp_result - 240;
+}
+
+static s32 check_nic_temperature(struct iwl_mvm *mvm)
+{
+       u16 volt_band_gap;
+       union dts_diode_results avg_ptat;
+
+       volt_band_gap = iwl_mvm_dts_get_volt_band_gap(mvm);
+
+       /* disable DTS */
+       iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 0);
+
+       /* SV initialization */
+       iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 1);
+       iwl_write_prph(mvm->trans, DTSC_CFG_MODE,
+                      DTSC_CFG_MODE_PERIODIC);
+
+       /* wait for results */
+       msleep(100);
+       if (!dts_read_ptat_avg_results(mvm, &avg_ptat))
+               return TEMPERATURE_ERROR;
+
+       /* disable DTS */
+       iwl_write_prph(mvm->trans, SHR_MISC_WFM_DTS_EN, 0);
+
+       return calculate_nic_temperature(avg_ptat, volt_band_gap);
+}
+
+static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
+{
+       u32 duration = mvm->thermal_throttle.params->ct_kill_duration;
+
+       IWL_ERR(mvm, "Enter CT Kill\n");
+       iwl_mvm_set_hw_ctkill_state(mvm, true);
+       schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit,
+                             round_jiffies_relative(duration * HZ));
+}
+
+static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
+{
+       IWL_ERR(mvm, "Exit CT Kill\n");
+       iwl_mvm_set_hw_ctkill_state(mvm, false);
+}
+
+static void check_exit_ctkill(struct work_struct *work)
+{
+       struct iwl_mvm_tt_mgmt *tt;
+       struct iwl_mvm *mvm;
+       u32 duration;
+       s32 temp;
+
+       tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
+       mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
+
+       duration = tt->params->ct_kill_duration;
+
+       iwl_trans_start_hw(mvm->trans);
+       temp = check_nic_temperature(mvm);
+       iwl_trans_stop_hw(mvm->trans, false);
+
+       if (temp < MIN_TEMPERATURE || temp > MAX_TEMPERATURE) {
+               IWL_DEBUG_TEMP(mvm, "Failed to measure NIC temperature\n");
+               goto reschedule;
+       }
+       IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
+
+       if (temp <= tt->params->ct_kill_exit) {
+               iwl_mvm_exit_ctkill(mvm);
+               return;
+       }
+
+reschedule:
+       schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit,
+                             round_jiffies(duration * HZ));
+}
+
+static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac,
+                                    struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = _data;
+       enum ieee80211_smps_mode smps_mode;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (mvm->thermal_throttle.dynamic_smps)
+               smps_mode = IEEE80211_SMPS_DYNAMIC;
+       else
+               smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode);
+}
+
+static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
+{
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_sta *mvmsta;
+       int i, err;
+
+       for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
+               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+                                               lockdep_is_held(&mvm->mutex));
+               if (IS_ERR_OR_NULL(sta))
+                       continue;
+               mvmsta = (void *)sta->drv_priv;
+               if (enable == mvmsta->tt_tx_protection)
+                       continue;
+               err = iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq,
+                                           mvmsta, enable);
+               if (err) {
+                       IWL_ERR(mvm, "Failed to %s Tx protection\n",
+                               enable ? "enable" : "disable");
+               } else {
+                       IWL_DEBUG_TEMP(mvm, "%s Tx protection\n",
+                                      enable ? "Enable" : "Disable");
+                       mvmsta->tt_tx_protection = enable;
+               }
+       }
+}
+
+static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
+{
+       struct iwl_host_cmd cmd = {
+               .id = REPLY_THERMAL_MNG_BACKOFF,
+               .len = { sizeof(u32), },
+               .data = { &backoff, },
+               .flags = CMD_SYNC,
+       };
+
+       if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
+               IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
+                              backoff);
+               mvm->thermal_throttle.tx_backoff = backoff;
+       } else {
+               IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n");
+       }
+}
+
+void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
+{
+       const struct iwl_tt_params *params = mvm->thermal_throttle.params;
+       struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+       s32 temperature = mvm->temperature;
+       int i;
+       u32 tx_backoff;
+
+       IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature);
+
+       if (params->support_ct_kill && temperature >= params->ct_kill_entry) {
+               iwl_mvm_enter_ctkill(mvm);
+               return;
+       }
+
+       if (params->support_dynamic_smps) {
+               if (!tt->dynamic_smps &&
+                   temperature >= params->dynamic_smps_entry) {
+                       IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n");
+                       tt->dynamic_smps = true;
+                       ieee80211_iterate_active_interfaces_atomic(
+                                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                       iwl_mvm_tt_smps_iterator, mvm);
+               } else if (tt->dynamic_smps &&
+                          temperature <= params->dynamic_smps_exit) {
+                       IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n");
+                       tt->dynamic_smps = false;
+                       ieee80211_iterate_active_interfaces_atomic(
+                                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                       iwl_mvm_tt_smps_iterator, mvm);
+               }
+       }
+
+       if (params->support_tx_protection) {
+               if (temperature >= params->tx_protection_entry)
+                       iwl_mvm_tt_tx_protection(mvm, true);
+               else if (temperature <= params->tx_protection_exit)
+                       iwl_mvm_tt_tx_protection(mvm, false);
+       }
+
+       if (params->support_tx_backoff) {
+               tx_backoff = 0;
+               for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
+                       if (temperature < params->tx_backoff[i].temperature)
+                               break;
+                       tx_backoff = params->tx_backoff[i].backoff;
+               }
+               if (tt->tx_backoff != tx_backoff)
+                       iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
+       }
+}
+
+static const struct iwl_tt_params iwl7000_tt_params = {
+       .ct_kill_entry = 118,
+       .ct_kill_exit = 96,
+       .ct_kill_duration = 5,
+       .dynamic_smps_entry = 114,
+       .dynamic_smps_exit = 110,
+       .tx_protection_entry = 114,
+       .tx_protection_exit = 108,
+       .tx_backoff = {
+               {.temperature = 112, .backoff = 200},
+               {.temperature = 113, .backoff = 600},
+               {.temperature = 114, .backoff = 1200},
+               {.temperature = 115, .backoff = 2000},
+               {.temperature = 116, .backoff = 4000},
+               {.temperature = 117, .backoff = 10000},
+       },
+       .support_ct_kill = true,
+       .support_dynamic_smps = true,
+       .support_tx_protection = true,
+       .support_tx_backoff = true,
+};
+
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
+{
+       struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+
+       IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
+       tt->params = &iwl7000_tt_params;
+       INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
+}
+
+void iwl_mvm_tt_exit(struct iwl_mvm *mvm)
+{
+       cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
+       IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
+}
index 48c1891e3df6b00ce3417b7609f9c240ae36f2e4..f0e96a927407d3ae1473481f5e7d3eeb894a5a2b 100644 (file)
@@ -175,7 +175,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
         * table is controlled by LINK_QUALITY commands
         */
 
-       if (ieee80211_is_data(fc)) {
+       if (ieee80211_is_data(fc) && sta) {
                tx_cmd->initial_rate_index = 0;
                tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
                return;
@@ -408,7 +408,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
                     tid, txq_id, seq_number);
 
-       /* NOTE: aggregation will need changes here (for txq id) */
        if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
                goto drop_unlock_sta;
 
@@ -610,8 +609,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                    !(info->flags & IEEE80211_TX_STAT_ACK))
                        info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
 
-               /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
-               if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
+               /* W/A FW bug: seq_ctl is wrong when the status isn't success */
+               if (status != TX_STATUS_SUCCESS) {
                        struct ieee80211_hdr *hdr = (void *)skb->data;
                        seq_ctl = le16_to_cpu(hdr->seq_ctrl);
                }
index 687b34e387ac325b0b5b5a3cf869625d31341ad4..1e1332839e4a745b2a57f9d837b112fff52d75bd 100644 (file)
@@ -76,6 +76,11 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
 {
        int ret;
 
+#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
+       if (WARN_ON(mvm->d3_test_active))
+               return -EIO;
+#endif
+
        /*
         * Synchronous commands from this op-mode must hold
         * the mutex, this ensures we don't try to send two
@@ -125,6 +130,11 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
 
        lockdep_assert_held(&mvm->mutex);
 
+#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
+       if (WARN_ON(mvm->d3_test_active))
+               return -EIO;
+#endif
+
        /*
         * Only synchronous commands can wait for status,
         * we use WANT_SKB so the caller can't.
@@ -471,3 +481,34 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
 
        return iwl_mvm_send_cmd(mvm, &cmd);
 }
+
+/**
+ * iwl_mvm_update_smps - Get a requst to change the SMPS mode
+ * @req_type: The part of the driver who call for a change.
+ * @smps_requests: The request to change the SMPS mode.
+ *
+ * Get a requst to change the SMPS mode,
+ * and change it according to all other requests in the driver.
+ */
+void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                        enum iwl_mvm_smps_type_request req_type,
+                        enum ieee80211_smps_mode smps_request)
+{
+       struct iwl_mvm_vif *mvmvif;
+       enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
+       int i;
+
+       lockdep_assert_held(&mvm->mutex);
+       mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       mvmvif->smps_requests[req_type] = smps_request;
+       for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+               if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
+                       smps_mode = IEEE80211_SMPS_STATIC;
+                       break;
+               }
+               if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
+                       smps_mode = IEEE80211_SMPS_DYNAMIC;
+       }
+
+       ieee80211_request_smps(vif, smps_mode);
+}
index 8cb53ec2b77bc6534faf95efaf64377e80ceb798..81f3ea5b09a4b7f26d0f4bb7877e50dedea6cd38 100644 (file)
@@ -78,6 +78,7 @@
 
 /* Hardware specific file defines the PCI IDs table for that hardware module */
 static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
+#if IS_ENABLED(CONFIG_IWLDVM)
        {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
        {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
        {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
@@ -253,13 +254,60 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
        {IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
        {IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+#endif /* CONFIG_IWLDVM */
 
+#if IS_ENABLED(CONFIG_IWLMVM)
 /* 7000 Series */
        {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_ac_cfg)},
-       {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
+
+/* 3160 Series */
+       {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
+#endif /* CONFIG_IWLMVM */
 
        {0}
 };
index 148843e7f34f26f9ea98d8ae269e366765cba115..b654dcdd048a599232b3488fefcf852e4218f004 100644 (file)
@@ -217,6 +217,7 @@ struct iwl_pcie_txq_scratch_buf {
  * @trans_pcie: pointer back to transport (for timer)
  * @need_update: indicates need to update read/write index
  * @active: stores if queue is active
+ * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
  *
  * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
  * descriptors) and required locking structures.
@@ -232,6 +233,7 @@ struct iwl_txq {
        struct iwl_trans_pcie *trans_pcie;
        u8 need_update;
        u8 active;
+       bool ampdu;
 };
 
 static inline dma_addr_t
index 567e67ad1f617682cbba7a90f2ca95e53eb07eea..3688dc5ba1aca0fb8f1bc4b90af285ff623fa553 100644 (file)
@@ -802,9 +802,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
        u32 handled = 0;
        unsigned long flags;
        u32 i;
-#ifdef CONFIG_IWLWIFI_DEBUG
-       u32 inta_mask;
-#endif
 
        lock_map_acquire(&trans->sync_cmd_lockdep_map);
 
@@ -826,14 +823,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
 
        inta = trans_pcie->inta;
 
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (iwl_have_debug_level(IWL_DL_ISR)) {
-               /* just for debug */
-               inta_mask = iwl_read32(trans, CSR_INT_MASK);
+       if (iwl_have_debug_level(IWL_DL_ISR))
                IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
-                             inta, inta_mask);
-       }
-#endif
+                             inta, iwl_read32(trans, CSR_INT_MASK));
 
        /* saved interrupt in inta variable now we can reset trans_pcie->inta */
        trans_pcie->inta = 0;
@@ -855,12 +847,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                goto out;
        }
 
-#ifdef CONFIG_IWLWIFI_DEBUG
        if (iwl_have_debug_level(IWL_DL_ISR)) {
                /* NIC fires this, but we don't use it, redundant with WAKEUP */
                if (inta & CSR_INT_BIT_SCD) {
-                       IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
-                                     "the frame/frames.\n");
+                       IWL_DEBUG_ISR(trans,
+                                     "Scheduler finished to transmit the frame/frames.\n");
                        isr_stats->sch++;
                }
 
@@ -870,7 +861,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                        isr_stats->alive++;
                }
        }
-#endif
+
        /* Safely ignore these bits for debug checks below */
        inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
 
@@ -1118,9 +1109,6 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
        struct iwl_trans *trans = data;
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        u32 inta, inta_mask;
-#ifdef CONFIG_IWLWIFI_DEBUG
-       u32 inta_fh;
-#endif
 
        lockdep_assert_held(&trans_pcie->irq_lock);
 
@@ -1159,13 +1147,11 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
                return IRQ_HANDLED;
        }
 
-#ifdef CONFIG_IWLWIFI_DEBUG
-       if (iwl_have_debug_level(IWL_DL_ISR)) {
-               inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
-               IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
-                             "fh 0x%08x\n", inta, inta_mask, inta_fh);
-       }
-#endif
+       if (iwl_have_debug_level(IWL_DL_ISR))
+               IWL_DEBUG_ISR(trans,
+                             "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
+                             inta, inta_mask,
+                             iwl_read32(trans, CSR_FH_INT_STATUS));
 
        trans_pcie->inta |= inta;
        /* the thread will service interrupts and re-enable them */
@@ -1198,7 +1184,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
 {
        struct iwl_trans *trans = data;
        struct iwl_trans_pcie *trans_pcie;
-       u32 inta, inta_mask;
+       u32 inta;
        u32 val = 0;
        u32 read;
        unsigned long flags;
@@ -1226,7 +1212,6 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
         * If we have something to service, the tasklet will re-enable ints.
         * If we *don't* have something, we'll re-enable before leaving here.
         */
-       inta_mask = iwl_read32(trans, CSR_INT_MASK);
        iwl_write32(trans, CSR_INT_MASK, 0x00000000);
 
        /* Ignore interrupt if there's nothing in NIC to service.
@@ -1271,8 +1256,11 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
                val |= 0x8000;
 
        inta = (0xff & val) | ((0xff00 & val) << 16);
-       IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
-                     inta, inta_mask, val);
+       IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
+                     inta, trans_pcie->inta_mask, val);
+       if (iwl_have_debug_level(IWL_DL_ISR))
+               IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
+                             iwl_read32(trans, CSR_INT_MASK));
 
        inta &= trans_pcie->inta_mask;
        trans_pcie->inta |= inta;
index 50ba0a468f94baa59dede7d4c50c8a3a2f960b35..197dbe0a868c5c1a97e8a7c386c5a2ce009038e7 100644 (file)
@@ -405,20 +405,27 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
 {
        u8 *v_addr;
        dma_addr_t p_addr;
-       u32 offset;
+       u32 offset, chunk_sz = section->len;
        int ret = 0;
 
        IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
                     section_num);
 
-       v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
-       if (!v_addr)
-               return -ENOMEM;
+       v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
+                                   GFP_KERNEL | __GFP_NOWARN);
+       if (!v_addr) {
+               IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
+               chunk_sz = PAGE_SIZE;
+               v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
+                                           &p_addr, GFP_KERNEL);
+               if (!v_addr)
+                       return -ENOMEM;
+       }
 
-       for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
+       for (offset = 0; offset < section->len; offset += chunk_sz) {
                u32 copy_size;
 
-               copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
+               copy_size = min_t(u32, chunk_sz, section->len - offset);
 
                memcpy(v_addr, (u8 *)section->data + offset, copy_size);
                ret = iwl_pcie_load_firmware_chunk(trans,
@@ -432,7 +439,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
                }
        }
 
-       dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
+       dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
        return ret;
 }
 
@@ -571,13 +578,17 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
        clear_bit(STATUS_RFKILL, &trans_pcie->status);
 }
 
-static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans)
+static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
 {
-       /* let the ucode operate on its own */
-       iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
-                   CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
        iwl_disable_interrupts(trans);
+
+       /*
+        * in testing mode, the host stays awake and the
+        * hardware won't be reset (not even partially)
+        */
+       if (test)
+               return;
+
        iwl_pcie_disable_ict(trans);
 
        iwl_clear_bit(trans, CSR_GP_CNTRL,
@@ -596,11 +607,18 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans)
 }
 
 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
-                                   enum iwl_d3_status *status)
+                                   enum iwl_d3_status *status,
+                                   bool test)
 {
        u32 val;
        int ret;
 
+       if (test) {
+               iwl_enable_interrupts(trans);
+               *status = IWL_D3_STATUS_ALIVE;
+               return 0;
+       }
+
        iwl_pcie_set_pwr(trans, false);
 
        val = iwl_read32(trans, CSR_RESET);
@@ -636,9 +654,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
                return ret;
        }
 
-       iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
-                   CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
        *status = IWL_D3_STATUS_ALIVE;
        return 0;
 }
@@ -917,11 +932,11 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
 }
 
 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
-                                   void *buf, int dwords)
+                                   const void *buf, int dwords)
 {
        unsigned long flags;
        int offs, ret = 0;
-       u32 *vals = buf;
+       const u32 *vals = buf;
 
        if (iwl_trans_grab_nic_access(trans, false, &flags)) {
                iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
index c5e30294c5acc746730d2b60c857addeba4f3a36..c47c92165aba91d2e97631f1953bd4e480cabad7 100644 (file)
@@ -224,13 +224,13 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
 
        switch (sec_ctl & TX_CMD_SEC_MSK) {
        case TX_CMD_SEC_CCM:
-               len += CCMP_MIC_LEN;
+               len += IEEE80211_CCMP_MIC_LEN;
                break;
        case TX_CMD_SEC_TKIP:
-               len += TKIP_ICV_LEN;
+               len += IEEE80211_TKIP_ICV_LEN;
                break;
        case TX_CMD_SEC_WEP:
-               len += WEP_IV_LEN + WEP_ICV_LEN;
+               len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
                break;
        }
 
@@ -576,10 +576,16 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
 
        spin_lock_bh(&txq->lock);
        while (q->write_ptr != q->read_ptr) {
+               IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+                                  txq_id, q->read_ptr);
                iwl_pcie_txq_free_tfd(trans, txq);
                q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
        }
+       txq->active = false;
        spin_unlock_bh(&txq->lock);
+
+       /* just in case - this queue may have been stopped */
+       iwl_wake_queue(trans, txq);
 }
 
 /*
@@ -927,6 +933,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
        spin_lock_bh(&txq->lock);
 
+       if (!txq->active) {
+               IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
+                                   txq_id, ssn);
+               goto out;
+       }
+
        if (txq->q.read_ptr == tfd_num)
                goto out;
 
@@ -1045,6 +1057,10 @@ static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
                (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
 }
 
+/* Receiver address (actually, Rx station's index into station table),
+ * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
+#define BUILD_RAxTID(sta_id, tid)      (((sta_id) << 4) + (tid))
+
 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
                               int sta_id, int tid, int frame_limit, u16 ssn)
 {
@@ -1069,6 +1085,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
 
                /* enable aggregations for the queue */
                iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+               trans_pcie->txq[txq_id].ampdu = true;
        } else {
                /*
                 * disable aggregations for the queue, this will also make the
@@ -1103,6 +1120,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
                       (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
                       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
                       SCD_QUEUE_STTS_REG_MSK);
+       trans_pcie->txq[txq_id].active = true;
        IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
                            txq_id, fifo, ssn & 0xff);
 }
@@ -1125,6 +1143,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
                            ARRAY_SIZE(zero_val));
 
        iwl_pcie_txq_unmap(trans, txq_id);
+       trans_pcie->txq[txq_id].ampdu = false;
 
        IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
 }
@@ -1518,11 +1537,13 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
        if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
                IWL_ERR(trans, "FW error in SYNC CMD %s\n",
                        get_cmd_string(trans_pcie, cmd->id));
+               dump_stack();
                ret = -EIO;
                goto cancel;
        }
 
-       if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+       if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+           test_bit(STATUS_RFKILL, &trans_pcie->status)) {
                IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
                ret = -ERFKILL;
                goto cancel;
@@ -1564,7 +1585,8 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
        if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
                return -EIO;
 
-       if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
+       if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+           test_bit(STATUS_RFKILL, &trans_pcie->status)) {
                IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
                                  cmd->id);
                return -ERFKILL;
@@ -1592,7 +1614,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        u8 wait_write_ptr = 0;
        __le16 fc = hdr->frame_control;
        u8 hdr_len = ieee80211_hdrlen(fc);
-       u16 __maybe_unused wifi_seq;
+       u16 wifi_seq;
 
        txq = &trans_pcie->txq[txq_id];
        q = &txq->q;
@@ -1609,13 +1631,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
         * the BA.
         * Check here that the packets are in the right place on the ring.
         */
-#ifdef CONFIG_IWLWIFI_DEBUG
        wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
-       WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
-                 ((wifi_seq & 0xff) != q->write_ptr),
+       WARN_ONCE(trans_pcie->txq[txq_id].ampdu &&
+                 (wifi_seq & 0xff) != q->write_ptr,
                  "Q: %d WiFi Seq %d tfdNum %d",
                  txq_id, wifi_seq, q->write_ptr);
-#endif
 
        /* Set up driver data for this TFD */
        txq->entries[q->write_ptr].skb = skb;
diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c
new file mode 100644 (file)
index 0000000..8d68307
--- /dev/null
@@ -0,0 +1,101 @@
+/*
+ * Marvell Wireless LAN device driver: 802.11h
+ *
+ * Copyright (C) 2013, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+#include "fw.h"
+
+
+/* This function appends 11h info to a buffer while joining an
+ * infrastructure BSS
+ */
+static void
+mwifiex_11h_process_infra_join(struct mwifiex_private *priv, u8 **buffer,
+                              struct mwifiex_bssdescriptor *bss_desc)
+{
+       struct mwifiex_ie_types_header *ie_header;
+       struct mwifiex_ie_types_pwr_capability *cap;
+       struct mwifiex_ie_types_local_pwr_constraint *constraint;
+       struct ieee80211_supported_band *sband;
+       u8 radio_type;
+       int i;
+
+       if (!buffer || !(*buffer))
+               return;
+
+       radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
+       sband = priv->wdev->wiphy->bands[radio_type];
+
+       cap = (struct mwifiex_ie_types_pwr_capability *)*buffer;
+       cap->header.type = cpu_to_le16(WLAN_EID_PWR_CAPABILITY);
+       cap->header.len = cpu_to_le16(2);
+       cap->min_pwr = 0;
+       cap->max_pwr = 0;
+       *buffer += sizeof(*cap);
+
+       constraint = (struct mwifiex_ie_types_local_pwr_constraint *)*buffer;
+       constraint->header.type = cpu_to_le16(WLAN_EID_PWR_CONSTRAINT);
+       constraint->header.len = cpu_to_le16(2);
+       constraint->chan = bss_desc->channel;
+       constraint->constraint = bss_desc->local_constraint;
+       *buffer += sizeof(*constraint);
+
+       ie_header = (struct mwifiex_ie_types_header *)*buffer;
+       ie_header->type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
+       ie_header->len  = cpu_to_le16(2 * sband->n_channels + 2);
+       *buffer += sizeof(*ie_header);
+       *(*buffer)++ = WLAN_EID_SUPPORTED_CHANNELS;
+       *(*buffer)++ = 2 * sband->n_channels;
+       for (i = 0; i < sband->n_channels; i++) {
+               *(*buffer)++ = ieee80211_frequency_to_channel(
+                                       sband->channels[i].center_freq);
+               *(*buffer)++ = 1; /* one channel in the subband */
+       }
+}
+
+/* Enable or disable the 11h extensions in the firmware */
+static int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag)
+{
+       u32 enable = flag;
+
+       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
+                                    HostCmd_ACT_GEN_SET, DOT11H_I, &enable);
+}
+
+/* This functions processes TLV buffer for a pending BSS Join command.
+ *
+ * Activate 11h functionality in the firmware if the spectrum management
+ * capability bit is found in the network we are joining. Also, necessary
+ * TLVs are set based on requested network's 11h capability.
+ */
+void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
+                             struct mwifiex_bssdescriptor *bss_desc)
+{
+       if (bss_desc->sensed_11h) {
+               /* Activate 11h functions in firmware, turns on capability
+                * bit
+                */
+               mwifiex_11h_activate(priv, true);
+               bss_desc->cap_info_bitmap |= WLAN_CAPABILITY_SPECTRUM_MGMT;
+               mwifiex_11h_process_infra_join(priv, buffer, bss_desc);
+       } else {
+               /* Deactivate 11h functions in the firmware */
+               mwifiex_11h_activate(priv, false);
+               bss_desc->cap_info_bitmap &= ~WLAN_CAPABILITY_SPECTRUM_MGMT;
+       }
+}
index 4f614aad9dedd2b2bcf8fd1e61015b8ce3f024db..f7ff4725506a5f58bd171432af541791e8835da3 100644 (file)
@@ -3,13 +3,13 @@ config MWIFIEX
        depends on CFG80211
        ---help---
          This adds support for wireless adapters based on Marvell
-         802.11n chipsets.
+         802.11n/ac chipsets.
 
          If you choose to build it as a module, it will be called
          mwifiex.
 
 config MWIFIEX_SDIO
-       tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797"
+       tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8897"
        depends on MWIFIEX && MMC
        select FW_LOADER
        ---help---
index ecf28464367fc5be95a8d231b0753af349b94ec9..a42a506fd32b6b0a211607c3ae0e224f6e6c8106 100644 (file)
@@ -40,6 +40,7 @@ mwifiex-y += sta_rx.o
 mwifiex-y += uap_txrx.o
 mwifiex-y += cfg80211.o
 mwifiex-y += ethtool.o
+mwifiex-y += 11h.o
 mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
 obj-$(CONFIG_MWIFIEX) += mwifiex.o
 
index e42b266a023a2c0df18e7cd85fd7dbbb3cf07d45..ef5fa890a28628666398f39624f83e1bfe6aa1f5 100644 (file)
@@ -20,6 +20,9 @@
 #include "cfg80211.h"
 #include "main.h"
 
+static char *reg_alpha2;
+module_param(reg_alpha2, charp, 0);
+
 static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
        {
                .max = 2, .types = BIT(NL80211_IFTYPE_STATION),
@@ -1231,6 +1234,51 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
        return 0;
 }
 
+/* cfg80211 operation handler for del_station.
+ * Function deauthenticates station which value is provided in mac parameter.
+ * If mac is NULL/broadcast, all stations in associated station list are
+ * deauthenticated. If bss is not started or there are no stations in
+ * associated stations list, no action is taken.
+ */
+static int
+mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
+                            u8 *mac)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+       struct mwifiex_sta_node *sta_node;
+       unsigned long flags;
+
+       if (list_empty(&priv->sta_list) || !priv->bss_started)
+               return 0;
+
+       if (!mac || is_broadcast_ether_addr(mac)) {
+               wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__);
+               list_for_each_entry(sta_node, &priv->sta_list, list) {
+                       if (mwifiex_send_cmd_sync(priv,
+                                                 HostCmd_CMD_UAP_STA_DEAUTH,
+                                                 HostCmd_ACT_GEN_SET, 0,
+                                                 sta_node->mac_addr))
+                               return -1;
+                       mwifiex_uap_del_sta_data(priv, sta_node);
+               }
+       } else {
+               wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, mac);
+               spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+               sta_node = mwifiex_get_sta_entry(priv, mac);
+               spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+               if (sta_node) {
+                       if (mwifiex_send_cmd_sync(priv,
+                                                 HostCmd_CMD_UAP_STA_DEAUTH,
+                                                 HostCmd_ACT_GEN_SET, 0,
+                                                 sta_node->mac_addr))
+                               return -1;
+                       mwifiex_uap_del_sta_data(priv, sta_node);
+               }
+       }
+
+       return 0;
+}
+
 static int
 mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
 {
@@ -1859,6 +1907,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
        int i, offset, ret;
        struct ieee80211_channel *chan;
        struct ieee_types_header *ie;
+       struct mwifiex_user_scan_cfg *user_scan_cfg;
 
        wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
 
@@ -1869,20 +1918,22 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
                return -EBUSY;
        }
 
-       if (priv->user_scan_cfg) {
+       /* Block scan request if scan operation or scan cleanup when interface
+        * is disabled is in process
+        */
+       if (priv->scan_request || priv->scan_aborting) {
                dev_err(priv->adapter->dev, "cmd: Scan already in process..\n");
                return -EBUSY;
        }
 
-       priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
-                                     GFP_KERNEL);
-       if (!priv->user_scan_cfg)
+       user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
+       if (!user_scan_cfg)
                return -ENOMEM;
 
        priv->scan_request = request;
 
-       priv->user_scan_cfg->num_ssids = request->n_ssids;
-       priv->user_scan_cfg->ssid_list = request->ssids;
+       user_scan_cfg->num_ssids = request->n_ssids;
+       user_scan_cfg->ssid_list = request->ssids;
 
        if (request->ie && request->ie_len) {
                offset = 0;
@@ -1902,25 +1953,25 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
        for (i = 0; i < min_t(u32, request->n_channels,
                              MWIFIEX_USER_SCAN_CHAN_MAX); i++) {
                chan = request->channels[i];
-               priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
-               priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
+               user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
+               user_scan_cfg->chan_list[i].radio_type = chan->band;
 
                if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
-                       priv->user_scan_cfg->chan_list[i].scan_type =
+                       user_scan_cfg->chan_list[i].scan_type =
                                                MWIFIEX_SCAN_TYPE_PASSIVE;
                else
-                       priv->user_scan_cfg->chan_list[i].scan_type =
+                       user_scan_cfg->chan_list[i].scan_type =
                                                MWIFIEX_SCAN_TYPE_ACTIVE;
 
-               priv->user_scan_cfg->chan_list[i].scan_time = 0;
+               user_scan_cfg->chan_list[i].scan_time = 0;
        }
 
-       ret = mwifiex_scan_networks(priv, priv->user_scan_cfg);
+       ret = mwifiex_scan_networks(priv, user_scan_cfg);
+       kfree(user_scan_cfg);
        if (ret) {
                dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+               priv->scan_aborting = false;
                priv->scan_request = NULL;
-               kfree(priv->user_scan_cfg);
-               priv->user_scan_cfg = NULL;
                return ret;
        }
 
@@ -2419,6 +2470,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
        .change_beacon = mwifiex_cfg80211_change_beacon,
        .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
        .set_antenna = mwifiex_cfg80211_set_antenna,
+       .del_station = mwifiex_cfg80211_del_station,
 #ifdef CONFIG_PM
        .suspend = mwifiex_cfg80211_suspend,
        .resume = mwifiex_cfg80211_resume,
@@ -2426,6 +2478,27 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
 #endif
 };
 
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
+       .flags = WIPHY_WOWLAN_MAGIC_PKT,
+       .n_patterns = MWIFIEX_MAX_FILTERS,
+       .pattern_min_len = 1,
+       .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
+       .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
+};
+#endif
+
+static bool mwifiex_is_valid_alpha2(const char *alpha2)
+{
+       if (!alpha2 || strlen(alpha2) != 2)
+               return false;
+
+       if (isalpha(alpha2[0]) && isalpha(alpha2[1]))
+               return true;
+
+       return false;
+}
+
 /*
  * This function registers the device with CFG802.11 subsystem.
  *
@@ -2478,16 +2551,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                        WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
                        WIPHY_FLAG_AP_UAPSD |
                        WIPHY_FLAG_CUSTOM_REGULATORY |
+                       WIPHY_FLAG_STRICT_REGULATORY |
                        WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
 
        wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
 
 #ifdef CONFIG_PM
-       wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT;
-       wiphy->wowlan.n_patterns = MWIFIEX_MAX_FILTERS;
-       wiphy->wowlan.pattern_min_len = 1;
-       wiphy->wowlan.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN;
-       wiphy->wowlan.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN;
+       wiphy->wowlan = &mwifiex_wowlan_support;
 #endif
 
        wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
@@ -2519,10 +2589,16 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                wiphy_free(wiphy);
                return ret;
        }
-       country_code = mwifiex_11d_code_2_region(priv->adapter->region_code);
-       if (country_code)
-               dev_info(adapter->dev,
-                        "ignoring F/W country code %2.2s\n", country_code);
+
+       if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
+               wiphy_info(wiphy, "driver hint alpha2: %2.2s\n", reg_alpha2);
+               regulatory_hint(wiphy, reg_alpha2);
+       } else {
+               country_code = mwifiex_11d_code_2_region(adapter->region_code);
+               if (country_code)
+                       wiphy_info(wiphy, "ignoring F/W country code %2.2s\n",
+                                  country_code);
+       }
 
        adapter->wiphy = wiphy;
        return ret;
index 26755d9acb556d20e735aa334032e66fb4c0be52..2d761477d15e5761ed968200484a6fd55d066f39 100644 (file)
@@ -570,6 +570,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
                case HostCmd_CMD_UAP_SYS_CONFIG:
                case HostCmd_CMD_UAP_BSS_START:
                case HostCmd_CMD_UAP_BSS_STOP:
+               case HostCmd_CMD_UAP_STA_DEAUTH:
                        ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action,
                                                      cmd_oid, data_buf,
                                                      cmd_ptr);
index 1f7578d553ec982e94b8c329310d08459cf18225..1b45aa5333008c9475595f91123c1bcb2f81e018 100644 (file)
@@ -245,6 +245,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HT_BW_20    0
 #define HT_BW_40    1
 
+#define DFS_CHAN_MOVE_TIME      10000
+
 #define HostCmd_CMD_GET_HW_SPEC                       0x0003
 #define HostCmd_CMD_802_11_SCAN                       0x0006
 #define HostCmd_CMD_802_11_GET_LOG                    0x000b
@@ -271,6 +273,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_802_11_SUBSCRIBE_EVENT            0x0075
 #define HostCmd_CMD_802_11_TX_RATE_QUERY              0x007f
 #define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS     0x0083
+#define HostCmd_CMD_CFG_DATA                          0x008f
 #define HostCmd_CMD_VERSION_EXT                       0x0097
 #define HostCmd_CMD_MEF_CFG                           0x009a
 #define HostCmd_CMD_RSSI_INFO                         0x00a4
@@ -279,6 +282,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_UAP_SYS_CONFIG                    0x00b0
 #define HostCmd_CMD_UAP_BSS_START                     0x00b1
 #define HostCmd_CMD_UAP_BSS_STOP                      0x00b2
+#define HostCmd_CMD_UAP_STA_DEAUTH                    0x00b5
 #define HostCmd_CMD_11N_CFG                           0x00cd
 #define HostCmd_CMD_11N_ADDBA_REQ                     0x00ce
 #define HostCmd_CMD_11N_ADDBA_RSP                     0x00cf
@@ -436,6 +440,7 @@ enum P2P_MODES {
 #define EVENT_BW_CHANGE                 0x00000048
 #define EVENT_UAP_MIC_COUNTERMEASURES   0x0000004c
 #define EVENT_HOSTWAKE_STAIE           0x0000004d
+#define EVENT_CHANNEL_SWITCH_ANN        0x00000050
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
 
 #define EVENT_ID_MASK                   0xffff
@@ -464,6 +469,8 @@ enum P2P_MODES {
 #define MWIFIEX_CRITERIA_UNICAST       BIT(1)
 #define MWIFIEX_CRITERIA_MULTICAST     BIT(3)
 
+#define CFG_DATA_TYPE_CAL              2
+
 struct mwifiex_ie_types_header {
        __le16 type;
        __le16 len;
@@ -971,6 +978,7 @@ enum SNMP_MIB_INDEX {
        LONG_RETRY_LIM_I = 7,
        FRAG_THRESH_I = 8,
        DOT11D_I = 9,
+       DOT11H_I = 10,
 };
 
 #define MAX_SNMP_BUF_SIZE   128
@@ -1197,6 +1205,23 @@ struct host_cmd_ds_amsdu_aggr_ctrl {
        __le16 curr_buf_size;
 } __packed;
 
+struct host_cmd_ds_sta_deauth {
+       u8 mac[ETH_ALEN];
+       __le16 reason;
+} __packed;
+
+struct mwifiex_ie_types_pwr_capability {
+       struct mwifiex_ie_types_header header;
+       s8 min_pwr;
+       s8 max_pwr;
+};
+
+struct mwifiex_ie_types_local_pwr_constraint {
+       struct mwifiex_ie_types_header header;
+       u8 chan;
+       u8 constraint;
+};
+
 struct mwifiex_ie_types_wmm_param_set {
        struct mwifiex_ie_types_header header;
        u8 wmm_ie[1];
@@ -1573,6 +1598,12 @@ struct mwifiex_ie_list {
        struct mwifiex_ie ie_list[MAX_MGMT_IE_INDEX];
 } __packed;
 
+struct host_cmd_ds_802_11_cfg_data {
+       __le16 action;
+       __le16 type;
+       __le16 data_len;
+} __packed;
+
 struct host_cmd_ds_command {
        __le16 command;
        __le16 size;
@@ -1630,7 +1661,9 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_802_11_eeprom_access eeprom;
                struct host_cmd_ds_802_11_subsc_evt subsc_evt;
                struct host_cmd_ds_sys_config uap_sys_config;
+               struct host_cmd_ds_sta_deauth sta_deauth;
                struct host_cmd_11ac_vht_cfg vht_cfg;
+               struct host_cmd_ds_802_11_cfg_data cfg_data;
        } params;
 } __packed;
 
index 9f44fda19db96b3af939fdebd408b95139e45c47..caaf4bd56b3062ea4c9a11028233c1517f69296c 100644 (file)
@@ -52,87 +52,6 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
        return 0;
 }
 
-static void scan_delay_timer_fn(unsigned long data)
-{
-       struct mwifiex_private *priv = (struct mwifiex_private *)data;
-       struct mwifiex_adapter *adapter = priv->adapter;
-       struct cmd_ctrl_node *cmd_node, *tmp_node;
-       unsigned long flags;
-
-       if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
-               /*
-                * Abort scan operation by cancelling all pending scan
-                * commands
-                */
-               spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-               list_for_each_entry_safe(cmd_node, tmp_node,
-                                        &adapter->scan_pending_q, list) {
-                       list_del(&cmd_node->list);
-                       mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-               }
-               spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
-
-               spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
-               adapter->scan_processing = false;
-               adapter->scan_delay_cnt = 0;
-               adapter->empty_tx_q_cnt = 0;
-               spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
-
-               if (priv->user_scan_cfg) {
-                       if (priv->scan_request) {
-                               dev_dbg(priv->adapter->dev,
-                                       "info: aborting scan\n");
-                               cfg80211_scan_done(priv->scan_request, 1);
-                               priv->scan_request = NULL;
-                       } else {
-                               dev_dbg(priv->adapter->dev,
-                                       "info: scan already aborted\n");
-                       }
-
-                       kfree(priv->user_scan_cfg);
-                       priv->user_scan_cfg = NULL;
-               }
-               goto done;
-       }
-
-       if (!atomic_read(&priv->adapter->is_tx_received)) {
-               adapter->empty_tx_q_cnt++;
-               if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
-                       /*
-                        * No Tx traffic for 200msec. Get scan command from
-                        * scan pending queue and put to cmd pending queue to
-                        * resume scan operation
-                        */
-                       adapter->scan_delay_cnt = 0;
-                       adapter->empty_tx_q_cnt = 0;
-                       spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-                       cmd_node = list_first_entry(&adapter->scan_pending_q,
-                                                   struct cmd_ctrl_node, list);
-                       list_del(&cmd_node->list);
-                       spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
-                                              flags);
-
-                       mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
-                                                       true);
-                       queue_work(adapter->workqueue, &adapter->main_work);
-                       goto done;
-               }
-       } else {
-               adapter->empty_tx_q_cnt = 0;
-       }
-
-       /* Delay scan operation further by 20msec */
-       mod_timer(&priv->scan_delay_timer, jiffies +
-                 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
-       adapter->scan_delay_cnt++;
-
-done:
-       if (atomic_read(&priv->adapter->is_tx_received))
-               atomic_set(&priv->adapter->is_tx_received, false);
-
-       return;
-}
-
 /*
  * This function initializes the private structure and sets default
  * values to the members.
@@ -214,8 +133,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
 
        priv->scan_block = false;
 
-       setup_timer(&priv->scan_delay_timer, scan_delay_timer_fn,
-                   (unsigned long)priv);
+       priv->csa_chan = 0;
+       priv->csa_expire_time = 0;
 
        return mwifiex_add_bss_prio_tbl(priv);
 }
@@ -447,23 +366,29 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
 }
 
 /*
- * This function frees the adapter structure.
+ * This function performs cleanup for adapter structure.
  *
- * The freeing operation is done recursively, by canceling all
- * pending commands, freeing the member buffers previously
- * allocated (command buffers, scan table buffer, sleep confirm
- * command buffer), stopping the timers and calling the cleanup
- * routines for every interface, before the actual adapter
- * structure is freed.
+ * The cleanup is done recursively, by canceling all pending
+ * commands, freeing the member buffers previously allocated
+ * (command buffers, scan table buffer, sleep confirm command
+ * buffer), stopping the timers and calling the cleanup routines
+ * for every interface.
  */
 static void
-mwifiex_free_adapter(struct mwifiex_adapter *adapter)
+mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
 {
+       int i;
+
        if (!adapter) {
                pr_err("%s: adapter is NULL\n", __func__);
                return;
        }
 
+       for (i = 0; i < adapter->priv_num; i++) {
+               if (adapter->priv[i])
+                       del_timer_sync(&adapter->priv[i]->scan_delay_timer);
+       }
+
        mwifiex_cancel_all_pending_cmd(adapter);
 
        /* Free lock variables */
@@ -684,7 +609,6 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
        int ret = -EINPROGRESS;
        struct mwifiex_private *priv;
        s32 i;
-       unsigned long flags;
        struct sk_buff *skb;
 
        /* mwifiex already shutdown */
@@ -719,7 +643,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
                }
        }
 
-       spin_lock_irqsave(&adapter->mwifiex_lock, flags);
+       spin_lock(&adapter->mwifiex_lock);
 
        if (adapter->if_ops.data_complete) {
                while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) {
@@ -733,10 +657,9 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
                }
        }
 
-       /* Free adapter structure */
-       mwifiex_free_adapter(adapter);
+       mwifiex_adapter_cleanup(adapter);
 
-       spin_unlock_irqrestore(&adapter->mwifiex_lock, flags);
+       spin_unlock(&adapter->mwifiex_lock);
 
        /* Notify completion */
        ret = mwifiex_shutdown_fw_complete(adapter);
index 6bcb66e6e97c8c1a43a139ba6970ddc0b15f701e..1c8a771e8e8127e666265d10447b88f884a4c5fa 100644 (file)
@@ -534,6 +534,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
 
        mwifiex_cmd_append_tsf_tlv(priv, &pos, bss_desc);
 
+       mwifiex_11h_process_join(priv, &pos, bss_desc);
+
        cmd->size = cpu_to_le16((u16) (pos - (u8 *) assoc) + S_DS_GEN);
 
        /* Set the Capability info at last */
@@ -919,9 +921,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
        memcpy(&priv->curr_bss_params.data_rates,
               &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
 
-       dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n",
-               adhoc_start->data_rate[0], adhoc_start->data_rate[1],
-               adhoc_start->data_rate[2], adhoc_start->data_rate[3]);
+       dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%4ph\n",
+               adhoc_start->data_rate);
 
        dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
 
index 2eb88ea9acf7f66f51e8e7544f52642586fe627e..e15ab72fb03ddc1ad6eab8e54aded37de3143253 100644 (file)
 #define VERSION        "1.0"
 
 const char driver_version[] = "mwifiex " VERSION " (%s) ";
+static char *cal_data_cfg;
+module_param(cal_data_cfg, charp, 0);
+
+static void scan_delay_timer_fn(unsigned long data)
+{
+       struct mwifiex_private *priv = (struct mwifiex_private *)data;
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct cmd_ctrl_node *cmd_node, *tmp_node;
+       unsigned long flags;
+
+       if (adapter->surprise_removed)
+               return;
+
+       if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+               /*
+                * Abort scan operation by cancelling all pending scan
+                * commands
+                */
+               spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+               list_for_each_entry_safe(cmd_node, tmp_node,
+                                        &adapter->scan_pending_q, list) {
+                       list_del(&cmd_node->list);
+                       mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+               }
+               spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
+               spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+               adapter->scan_processing = false;
+               adapter->scan_delay_cnt = 0;
+               adapter->empty_tx_q_cnt = 0;
+               spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+
+               if (priv->scan_request) {
+                       dev_dbg(adapter->dev, "info: aborting scan\n");
+                       cfg80211_scan_done(priv->scan_request, 1);
+                       priv->scan_request = NULL;
+               } else {
+                       priv->scan_aborting = false;
+                       dev_dbg(adapter->dev, "info: scan already aborted\n");
+               }
+               goto done;
+       }
+
+       if (!atomic_read(&priv->adapter->is_tx_received)) {
+               adapter->empty_tx_q_cnt++;
+               if (adapter->empty_tx_q_cnt == MWIFIEX_MAX_EMPTY_TX_Q_CNT) {
+                       /*
+                        * No Tx traffic for 200msec. Get scan command from
+                        * scan pending queue and put to cmd pending queue to
+                        * resume scan operation
+                        */
+                       adapter->scan_delay_cnt = 0;
+                       adapter->empty_tx_q_cnt = 0;
+                       spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+                       cmd_node = list_first_entry(&adapter->scan_pending_q,
+                                                   struct cmd_ctrl_node, list);
+                       list_del(&cmd_node->list);
+                       spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+                                              flags);
+
+                       mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+                                                       true);
+                       queue_work(adapter->workqueue, &adapter->main_work);
+                       goto done;
+               }
+       } else {
+               adapter->empty_tx_q_cnt = 0;
+       }
+
+       /* Delay scan operation further by 20msec */
+       mod_timer(&priv->scan_delay_timer, jiffies +
+                 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+       adapter->scan_delay_cnt++;
+
+done:
+       if (atomic_read(&priv->adapter->is_tx_received))
+               atomic_set(&priv->adapter->is_tx_received, false);
+
+       return;
+}
 
 /*
  * This function registers the device and performs all the necessary
@@ -73,6 +153,10 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
 
                adapter->priv[i]->adapter = adapter;
                adapter->priv_num++;
+
+               setup_timer(&adapter->priv[i]->scan_delay_timer,
+                           scan_delay_timer_fn,
+                           (unsigned long)adapter->priv[i]);
        }
        mwifiex_init_lock_list(adapter);
 
@@ -336,6 +420,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
 
        dev_notice(adapter->dev, "WLAN FW is active\n");
 
+       if (cal_data_cfg) {
+               if ((request_firmware(&adapter->cal_data, cal_data_cfg,
+                                     adapter->dev)) < 0)
+                       dev_err(adapter->dev,
+                               "Cal data request_firmware() failed\n");
+       }
+
        adapter->init_wait_q_woken = false;
        ret = mwifiex_init_fw(adapter);
        if (ret == -1) {
@@ -390,6 +481,10 @@ err_init_fw:
        pr_debug("info: %s: unregister device\n", __func__);
        adapter->if_ops.unregister_dev(adapter);
 done:
+       if (adapter->cal_data) {
+               release_firmware(adapter->cal_data);
+               adapter->cal_data = NULL;
+       }
        release_firmware(adapter->firmware);
        complete(&adapter->fw_load);
        return;
@@ -436,6 +531,7 @@ mwifiex_close(struct net_device *dev)
                dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n");
                cfg80211_scan_done(priv->scan_request, 1);
                priv->scan_request = NULL;
+               priv->scan_aborting = true;
        }
 
        return 0;
@@ -573,9 +669,8 @@ static void mwifiex_set_multicast_list(struct net_device *dev)
                mcast_list.mode = MWIFIEX_ALL_MULTI_MODE;
        } else {
                mcast_list.mode = MWIFIEX_MULTICAST_MODE;
-               if (netdev_mc_count(dev))
-                       mcast_list.num_multicast_addr =
-                               mwifiex_copy_mcast_addr(&mcast_list, dev);
+               mcast_list.num_multicast_addr =
+                       mwifiex_copy_mcast_addr(&mcast_list, dev);
        }
        mwifiex_request_set_multicast_list(priv, &mcast_list);
 }
index 4ef67fca06d3da5639acc7d8edf7e24aa52f0c2c..3da73d36acdf5e9a5ebe30ce75333704d98a00c0 100644 (file)
@@ -309,6 +309,9 @@ struct mwifiex_bssdescriptor {
        u16 wapi_offset;
        u8 *beacon_buf;
        u32 beacon_buf_size;
+       u8 sensed_11h;
+       u8 local_constraint;
+       u8 chan_sw_ie_present;
 };
 
 struct mwifiex_current_bss_params {
@@ -492,7 +495,6 @@ struct mwifiex_private {
        struct semaphore async_sem;
        u8 report_scan_result;
        struct cfg80211_scan_request *scan_request;
-       struct mwifiex_user_scan_cfg *user_scan_cfg;
        u8 cfg_bssid[6];
        struct wps wps;
        u8 scan_block;
@@ -510,6 +512,9 @@ struct mwifiex_private {
        u8 ap_11ac_enabled;
        u32 mgmt_frame_mask;
        struct mwifiex_roc_cfg roc_cfg;
+       bool scan_aborting;
+       u8 csa_chan;
+       unsigned long csa_expire_time;
 };
 
 enum mwifiex_ba_status {
@@ -730,6 +735,7 @@ struct mwifiex_adapter {
        u16 max_mgmt_ie_index;
        u8 scan_delay_cnt;
        u8 empty_tx_q_cnt;
+       const struct firmware *cal_data;
 
        /* 11AC */
        u32 is_hw_11ac_capable;
@@ -1017,6 +1023,24 @@ static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
        return (*(u32 *)skb->data == PKT_TYPE_MGMT);
 }
 
+/* This function retrieves channel closed for operation by Channel
+ * Switch Announcement.
+ */
+static inline u8
+mwifiex_11h_get_csa_closed_channel(struct mwifiex_private *priv)
+{
+       if (!priv->csa_chan)
+               return 0;
+
+       /* Clear csa channel, if DFS channel move time has passed */
+       if (jiffies > priv->csa_expire_time) {
+               priv->csa_chan = 0;
+               priv->csa_expire_time = 0;
+       }
+
+       return priv->csa_chan;
+}
+
 int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
                             u32 func_init_shutdown);
 int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -1115,6 +1139,12 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
                         struct cfg80211_beacon_data *data);
 int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
 u8 *mwifiex_11d_code_2_region(u8 code);
+void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
+                             struct mwifiex_sta_node *node);
+
+void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
+                             struct mwifiex_bssdescriptor *bss_desc);
+int mwifiex_11h_handle_event_chanswann(struct mwifiex_private *priv);
 
 extern const struct ethtool_ops mwifiex_ethtool_ops;
 
index 9cf5d8f07df80b63d5385c416b5206025c3a776b..c447d9bd1aa93746f5ad68b7c9205e1e85e20810 100644 (file)
@@ -391,6 +391,12 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
                return 0;
        }
 
+       if (bss_desc->chan_sw_ie_present) {
+               dev_err(adapter->dev,
+                       "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n");
+               return -1;
+       }
+
        if (mwifiex_is_bss_wapi(priv, bss_desc)) {
                dev_dbg(adapter->dev, "info: return success for WAPI AP\n");
                return 0;
@@ -569,6 +575,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
                return -1;
        }
 
+       /* Check csa channel expiry before preparing scan list */
+       mwifiex_11h_get_csa_closed_channel(priv);
+
        chan_tlv_out->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
 
        /* Set the temp channel struct pointer to the start of the desired
@@ -598,6 +607,11 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
                while (tlv_idx < max_chan_per_scan &&
                       tmp_chan_list->chan_number && !done_early) {
 
+                       if (tmp_chan_list->chan_number == priv->csa_chan) {
+                               tmp_chan_list++;
+                               continue;
+                       }
+
                        dev_dbg(priv->adapter->dev,
                                "info: Scan: Chan(%3d), Radio(%d),"
                                " Mode(%d, %d), Dur(%d)\n",
@@ -1169,6 +1183,19 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                        bss_entry->erp_flags = *(current_ptr + 2);
                        break;
 
+               case WLAN_EID_PWR_CONSTRAINT:
+                       bss_entry->local_constraint = *(current_ptr + 2);
+                       bss_entry->sensed_11h = true;
+                       break;
+
+               case WLAN_EID_CHANNEL_SWITCH:
+                       bss_entry->chan_sw_ie_present = true;
+               case WLAN_EID_PWR_CAPABILITY:
+               case WLAN_EID_TPC_REPORT:
+               case WLAN_EID_QUIET:
+                       bss_entry->sensed_11h = true;
+                   break;
+
                case WLAN_EID_EXT_SUPP_RATES:
                        /*
                         * Only process extended supported rate
@@ -1575,6 +1602,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                goto check_next_scan;
        }
 
+       /* Check csa channel expiry before parsing scan response */
+       mwifiex_11h_get_csa_closed_channel(priv);
+
        bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
        dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n",
                bytes_left);
@@ -1727,6 +1757,13 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                        struct ieee80211_channel *chan;
                        u8 band;
 
+                       /* Skip entry if on csa closed channel */
+                       if (channel == priv->csa_chan) {
+                               dev_dbg(adapter->dev,
+                                       "Dropping entry on csa closed channel\n");
+                               continue;
+                       }
+
                        band = BAND_G;
                        if (chan_band_tlv) {
                                chan_band =
@@ -1784,22 +1821,17 @@ check_next_scan:
                if (priv->report_scan_result)
                        priv->report_scan_result = false;
 
-               if (priv->user_scan_cfg) {
-                       if (priv->scan_request) {
-                               dev_dbg(priv->adapter->dev,
-                                       "info: notifying scan done\n");
-                               cfg80211_scan_done(priv->scan_request, 0);
-                               priv->scan_request = NULL;
-                       } else {
-                               dev_dbg(priv->adapter->dev,
-                                       "info: scan already aborted\n");
-                       }
-
-                       kfree(priv->user_scan_cfg);
-                       priv->user_scan_cfg = NULL;
+               if (priv->scan_request) {
+                       dev_dbg(adapter->dev, "info: notifying scan done\n");
+                       cfg80211_scan_done(priv->scan_request, 0);
+                       priv->scan_request = NULL;
+               } else {
+                       priv->scan_aborting = false;
+                       dev_dbg(adapter->dev, "info: scan already aborted\n");
                }
        } else {
-               if (priv->user_scan_cfg && !priv->scan_request) {
+               if ((priv->scan_aborting && !priv->scan_request) ||
+                   priv->scan_block) {
                        spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
                                               flags);
                        adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
index 363ba31b58bf057fbd305007a70aba9ffea82978..5ee5ed02eccd56645c94f8b4972962130f5b305b 100644 (file)
@@ -77,6 +77,17 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
 
        func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
+       if (id->driver_data) {
+               struct mwifiex_sdio_device *data = (void *)id->driver_data;
+
+               card->firmware = data->firmware;
+               card->reg = data->reg;
+               card->max_ports = data->max_ports;
+               card->mp_agg_pkt_limit = data->mp_agg_pkt_limit;
+               card->supports_sdio_new_mode = data->supports_sdio_new_mode;
+               card->has_control_mask = data->has_control_mask;
+       }
+
        sdio_claim_host(func);
        ret = sdio_enable_func(func);
        sdio_release_host(func);
@@ -251,12 +262,19 @@ static int mwifiex_sdio_resume(struct device *dev)
 #define SDIO_DEVICE_ID_MARVELL_8787   (0x9119)
 /* Device ID for SD8797 */
 #define SDIO_DEVICE_ID_MARVELL_8797   (0x9129)
+/* Device ID for SD8897 */
+#define SDIO_DEVICE_ID_MARVELL_8897   (0x912d)
 
 /* WLAN IDs */
 static const struct sdio_device_id mwifiex_ids[] = {
-       {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786)},
-       {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
-       {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)},
+       {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8786),
+               .driver_data = (unsigned long) &mwifiex_sdio_sd8786},
+       {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787),
+               .driver_data = (unsigned long) &mwifiex_sdio_sd8787},
+       {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797),
+               .driver_data = (unsigned long) &mwifiex_sdio_sd8797},
+       {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897),
+               .driver_data = (unsigned long) &mwifiex_sdio_sd8897},
        {},
 };
 
@@ -282,13 +300,13 @@ static struct sdio_driver mwifiex_sdio = {
  * This function writes data into SDIO card register.
  */
 static int
-mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u32 data)
+mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data)
 {
        struct sdio_mmc_card *card = adapter->card;
        int ret = -1;
 
        sdio_claim_host(card->func);
-       sdio_writeb(card->func, (u8) data, reg, &ret);
+       sdio_writeb(card->func, data, reg, &ret);
        sdio_release_host(card->func);
 
        return ret;
@@ -298,7 +316,7 @@ mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u32 data)
  * This function reads data from SDIO card register.
  */
 static int
-mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u32 *data)
+mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data)
 {
        struct sdio_mmc_card *card = adapter->card;
        int ret = -1;
@@ -400,7 +418,40 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
 }
 
 /*
- * This function initializes the IO ports.
+ * This function is used to initialize IO ports for the
+ * chipsets supporting SDIO new mode eg SD8897.
+ */
+static int mwifiex_init_sdio_new_mode(struct mwifiex_adapter *adapter)
+{
+       u8 reg;
+
+       adapter->ioport = MEM_PORT;
+
+       /* enable sdio new mode */
+       if (mwifiex_read_reg(adapter, CARD_CONFIG_2_1_REG, &reg))
+               return -1;
+       if (mwifiex_write_reg(adapter, CARD_CONFIG_2_1_REG,
+                             reg | CMD53_NEW_MODE))
+               return -1;
+
+       /* Configure cmd port and enable reading rx length from the register */
+       if (mwifiex_read_reg(adapter, CMD_CONFIG_0, &reg))
+               return -1;
+       if (mwifiex_write_reg(adapter, CMD_CONFIG_0, reg | CMD_PORT_RD_LEN_EN))
+               return -1;
+
+       /* Enable Dnld/Upld ready auto reset for cmd port after cmd53 is
+        * completed
+        */
+       if (mwifiex_read_reg(adapter, CMD_CONFIG_1, &reg))
+               return -1;
+       if (mwifiex_write_reg(adapter, CMD_CONFIG_1, reg | CMD_PORT_AUTO_EN))
+               return -1;
+
+       return 0;
+}
+
+/* This function initializes the IO ports.
  *
  * The following operations are performed -
  *      - Read the IO ports (0, 1 and 2)
@@ -409,10 +460,17 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
  */
 static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter)
 {
-       u32 reg;
+       u8 reg;
+       struct sdio_mmc_card *card = adapter->card;
 
        adapter->ioport = 0;
 
+       if (card->supports_sdio_new_mode) {
+               if (mwifiex_init_sdio_new_mode(adapter))
+                       return -1;
+               goto cont;
+       }
+
        /* Read the IO port */
        if (!mwifiex_read_reg(adapter, IO_PORT_0_REG, &reg))
                adapter->ioport |= (reg & 0xff);
@@ -428,19 +486,19 @@ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter)
                adapter->ioport |= ((reg & 0xff) << 16);
        else
                return -1;
-
+cont:
        pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
 
        /* Set Host interrupt reset to read to clear */
        if (!mwifiex_read_reg(adapter, HOST_INT_RSR_REG, &reg))
                mwifiex_write_reg(adapter, HOST_INT_RSR_REG,
-                                 reg | SDIO_INT_MASK);
+                                 reg | card->reg->sdio_int_mask);
        else
                return -1;
 
        /* Dnld/Upld ready set to auto reset */
-       if (!mwifiex_read_reg(adapter, CARD_MISC_CFG_REG, &reg))
-               mwifiex_write_reg(adapter, CARD_MISC_CFG_REG,
+       if (!mwifiex_read_reg(adapter, card->reg->card_misc_cfg_reg, &reg))
+               mwifiex_write_reg(adapter, card->reg->card_misc_cfg_reg,
                                  reg | AUTO_RE_ENABLE_INT);
        else
                return -1;
@@ -486,34 +544,42 @@ static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter,
 static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
 {
        struct sdio_mmc_card *card = adapter->card;
-       u16 rd_bitmap = card->mp_rd_bitmap;
+       const struct mwifiex_sdio_card_reg *reg = card->reg;
+       u32 rd_bitmap = card->mp_rd_bitmap;
 
-       dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%04x\n", rd_bitmap);
+       dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%08x\n", rd_bitmap);
 
-       if (!(rd_bitmap & (CTRL_PORT_MASK | DATA_PORT_MASK)))
-               return -1;
+       if (card->supports_sdio_new_mode) {
+               if (!(rd_bitmap & reg->data_port_mask))
+                       return -1;
+       } else {
+               if (!(rd_bitmap & (CTRL_PORT_MASK | reg->data_port_mask)))
+                       return -1;
+       }
 
-       if (card->mp_rd_bitmap & CTRL_PORT_MASK) {
-               card->mp_rd_bitmap &= (u16) (~CTRL_PORT_MASK);
+       if ((card->has_control_mask) &&
+           (card->mp_rd_bitmap & CTRL_PORT_MASK)) {
+               card->mp_rd_bitmap &= (u32) (~CTRL_PORT_MASK);
                *port = CTRL_PORT;
-               dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%04x\n",
+               dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%08x\n",
                        *port, card->mp_rd_bitmap);
-       } else {
-               if (card->mp_rd_bitmap & (1 << card->curr_rd_port)) {
-                       card->mp_rd_bitmap &= (u16)
-                                               (~(1 << card->curr_rd_port));
-                       *port = card->curr_rd_port;
+               return 0;
+       }
 
-                       if (++card->curr_rd_port == MAX_PORT)
-                               card->curr_rd_port = 1;
-               } else {
-                       return -1;
-               }
+       if (!(card->mp_rd_bitmap & (1 << card->curr_rd_port)))
+               return -1;
+
+       /* We are now handling the SDIO data ports */
+       card->mp_rd_bitmap &= (u32)(~(1 << card->curr_rd_port));
+       *port = card->curr_rd_port;
+
+       if (++card->curr_rd_port == card->max_ports)
+               card->curr_rd_port = reg->start_rd_port;
+
+       dev_dbg(adapter->dev,
+               "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n",
+               *port, rd_bitmap, card->mp_rd_bitmap);
 
-               dev_dbg(adapter->dev,
-                       "data: port=%d mp_rd_bitmap=0x%04x -> 0x%04x\n",
-                       *port, rd_bitmap, card->mp_rd_bitmap);
-       }
        return 0;
 }
 
@@ -524,35 +590,45 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
  * increased (provided it does not reach the maximum limit, in which
  * case it is reset to 1)
  */
-static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port)
+static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port)
 {
        struct sdio_mmc_card *card = adapter->card;
-       u16 wr_bitmap = card->mp_wr_bitmap;
+       const struct mwifiex_sdio_card_reg *reg = card->reg;
+       u32 wr_bitmap = card->mp_wr_bitmap;
 
-       dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%04x\n", wr_bitmap);
+       dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap);
 
-       if (!(wr_bitmap & card->mp_data_port_mask))
+       if (card->supports_sdio_new_mode &&
+           !(wr_bitmap & reg->data_port_mask)) {
+               adapter->data_sent = true;
+               return -EBUSY;
+       } else if (!card->supports_sdio_new_mode &&
+                  !(wr_bitmap & card->mp_data_port_mask)) {
                return -1;
+       }
 
        if (card->mp_wr_bitmap & (1 << card->curr_wr_port)) {
-               card->mp_wr_bitmap &= (u16) (~(1 << card->curr_wr_port));
+               card->mp_wr_bitmap &= (u32) (~(1 << card->curr_wr_port));
                *port = card->curr_wr_port;
-               if (++card->curr_wr_port == card->mp_end_port)
-                       card->curr_wr_port = 1;
+               if (((card->supports_sdio_new_mode) &&
+                    (++card->curr_wr_port == card->max_ports)) ||
+                   ((!card->supports_sdio_new_mode) &&
+                    (++card->curr_wr_port == card->mp_end_port)))
+                       card->curr_wr_port = reg->start_wr_port;
        } else {
                adapter->data_sent = true;
                return -EBUSY;
        }
 
-       if (*port == CTRL_PORT) {
-               dev_err(adapter->dev, "invalid data port=%d cur port=%d"
-                       " mp_wr_bitmap=0x%04x -> 0x%04x\n",
+       if ((card->has_control_mask) && (*port == CTRL_PORT)) {
+               dev_err(adapter->dev,
+                       "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
                        *port, card->curr_wr_port, wr_bitmap,
                        card->mp_wr_bitmap);
                return -1;
        }
 
-       dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%04x -> 0x%04x\n",
+       dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
                *port, wr_bitmap, card->mp_wr_bitmap);
 
        return 0;
@@ -564,11 +640,12 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u8 *port)
 static int
 mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
 {
+       struct sdio_mmc_card *card = adapter->card;
        u32 tries;
-       u32 cs;
+       u8 cs;
 
        for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
-               if (mwifiex_read_reg(adapter, CARD_STATUS_REG, &cs))
+               if (mwifiex_read_reg(adapter, card->reg->poll_reg, &cs))
                        break;
                else if ((cs & bits) == bits)
                        return 0;
@@ -587,12 +664,14 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
 static int
 mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
 {
-       u32 fws0, fws1;
+       struct sdio_mmc_card *card = adapter->card;
+       const struct mwifiex_sdio_card_reg *reg = card->reg;
+       u8 fws0, fws1;
 
-       if (mwifiex_read_reg(adapter, CARD_FW_STATUS0_REG, &fws0))
+       if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0))
                return -1;
 
-       if (mwifiex_read_reg(adapter, CARD_FW_STATUS1_REG, &fws1))
+       if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1))
                return -1;
 
        *dat = (u16) ((fws1 << 8) | fws0);
@@ -608,14 +687,14 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
  */
 static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
 {
-       u32 host_int_mask;
+       u8 host_int_mask, host_int_disable = HOST_INT_DISABLE;
 
        /* Read back the host_int_mask register */
        if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask))
                return -1;
 
        /* Update with the mask and write back to the register */
-       host_int_mask &= ~HOST_INT_DISABLE;
+       host_int_mask &= ~host_int_disable;
 
        if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) {
                dev_err(adapter->dev, "disable host interrupt failed\n");
@@ -633,8 +712,11 @@ static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
  */
 static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
 {
+       struct sdio_mmc_card *card = adapter->card;
+
        /* Simply write the mask to the register */
-       if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, HOST_INT_ENABLE)) {
+       if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG,
+                             card->reg->host_int_enable)) {
                dev_err(adapter->dev, "enable host interrupt failed\n");
                return -1;
        }
@@ -686,11 +768,13 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
 static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                                    struct mwifiex_fw_image *fw)
 {
+       struct sdio_mmc_card *card = adapter->card;
+       const struct mwifiex_sdio_card_reg *reg = card->reg;
        int ret;
        u8 *firmware = fw->fw_buf;
        u32 firmware_len = fw->fw_len;
        u32 offset = 0;
-       u32 base0, base1;
+       u8 base0, base1;
        u8 *fwbuf;
        u16 len = 0;
        u32 txlen, tx_blocks = 0, tries;
@@ -727,7 +811,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                        break;
 
                for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
-                       ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_0,
+                       ret = mwifiex_read_reg(adapter, reg->base_0_reg,
                                               &base0);
                        if (ret) {
                                dev_err(adapter->dev,
@@ -736,7 +820,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                                        base0, base0);
                                goto done;
                        }
-                       ret = mwifiex_read_reg(adapter, HOST_F1_RD_BASE_1,
+                       ret = mwifiex_read_reg(adapter, reg->base_1_reg,
                                               &base1);
                        if (ret) {
                                dev_err(adapter->dev,
@@ -828,10 +912,11 @@ done:
 static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
                                   u32 poll_num)
 {
+       struct sdio_mmc_card *card = adapter->card;
        int ret = 0;
        u16 firmware_stat;
        u32 tries;
-       u32 winner_status;
+       u8 winner_status;
 
        /* Wait for firmware initialization event */
        for (tries = 0; tries < poll_num; tries++) {
@@ -849,7 +934,7 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
 
        if (ret) {
                if (mwifiex_read_reg
-                   (adapter, CARD_FW_STATUS0_REG, &winner_status))
+                   (adapter, card->reg->status_reg_0, &winner_status))
                        winner_status = 0;
 
                if (winner_status)
@@ -866,12 +951,12 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
 static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
-       u32 sdio_ireg;
+       u8 sdio_ireg;
        unsigned long flags;
 
-       if (mwifiex_read_data_sync(adapter, card->mp_regs, MAX_MP_REGS,
-                                  REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK,
-                                  0)) {
+       if (mwifiex_read_data_sync(adapter, card->mp_regs,
+                                  card->reg->max_mp_regs,
+                                  REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
                dev_err(adapter->dev, "read mp_regs failed\n");
                return;
        }
@@ -880,6 +965,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
        if (sdio_ireg) {
                /*
                 * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
+                * For SDIO new mode CMD port interrupts
+                *      DN_LD_CMD_PORT_HOST_INT_STATUS and/or
+                *      UP_LD_CMD_PORT_HOST_INT_STATUS
                 * Clear the interrupt status register
                 */
                dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
@@ -1003,11 +1091,11 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
        s32 f_aggr_cur = 0;
        struct sk_buff *skb_deaggr;
        u32 pind;
-       u32 pkt_len, pkt_type = 0;
+       u32 pkt_len, pkt_type, mport;
        u8 *curr_ptr;
        u32 rx_len = skb->len;
 
-       if (port == CTRL_PORT) {
+       if ((card->has_control_mask) && (port == CTRL_PORT)) {
                /* Read the command Resp without aggr */
                dev_dbg(adapter->dev, "info: %s: no aggregation for cmd "
                        "response\n", __func__);
@@ -1024,7 +1112,10 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                goto rx_curr_single;
        }
 
-       if (card->mp_rd_bitmap & (~((u16) CTRL_PORT_MASK))) {
+       if ((!card->has_control_mask && (card->mp_rd_bitmap &
+                                        card->reg->data_port_mask)) ||
+           (card->has_control_mask && (card->mp_rd_bitmap &
+                                       (~((u32) CTRL_PORT_MASK))))) {
                /* Some more data RX pending */
                dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
 
@@ -1060,10 +1151,10 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
        if (f_aggr_cur) {
                dev_dbg(adapter->dev, "info: current packet aggregation\n");
                /* Curr pkt can be aggregated */
-               MP_RX_AGGR_SETUP(card, skb, port);
+               mp_rx_aggr_setup(card, skb, port);
 
                if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
-                   MP_RX_AGGR_PORT_LIMIT_REACHED(card)) {
+                   mp_rx_aggr_port_limit_reached(card)) {
                        dev_dbg(adapter->dev, "info: %s: aggregated packet "
                                "limit reached\n", __func__);
                        /* No more pkts allowed in Aggr buf, rx it */
@@ -1076,11 +1167,28 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n",
                        card->mpa_rx.pkt_cnt);
 
+               if (card->supports_sdio_new_mode) {
+                       int i;
+                       u32 port_count;
+
+                       for (i = 0, port_count = 0; i < card->max_ports; i++)
+                               if (card->mpa_rx.ports & BIT(i))
+                                       port_count++;
+
+                       /* Reading data from "start_port + 0" to "start_port +
+                        * port_count -1", so decrease the count by 1
+                        */
+                       port_count--;
+                       mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
+                                (port_count << 8)) + card->mpa_rx.start_port;
+               } else {
+                       mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
+                                (card->mpa_rx.ports << 4)) +
+                                card->mpa_rx.start_port;
+               }
+
                if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf,
-                                          card->mpa_rx.buf_len,
-                                          (adapter->ioport | 0x1000 |
-                                           (card->mpa_rx.ports << 4)) +
-                                          card->mpa_rx.start_port, 1))
+                                          card->mpa_rx.buf_len, mport, 1))
                        goto error;
 
                curr_ptr = card->mpa_rx.buf;
@@ -1167,6 +1275,7 @@ error:
 static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
+       const struct mwifiex_sdio_card_reg *reg = card->reg;
        int ret = 0;
        u8 sdio_ireg;
        struct sk_buff *skb;
@@ -1175,6 +1284,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
        u32 rx_blocks;
        u16 rx_len;
        unsigned long flags;
+       u32 bitmap;
+       u8 cr;
 
        spin_lock_irqsave(&adapter->int_lock, flags);
        sdio_ireg = adapter->int_status;
@@ -1184,10 +1295,60 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
        if (!sdio_ireg)
                return ret;
 
+       /* Following interrupt is only for SDIO new mode */
+       if (sdio_ireg & DN_LD_CMD_PORT_HOST_INT_STATUS && adapter->cmd_sent)
+               adapter->cmd_sent = false;
+
+       /* Following interrupt is only for SDIO new mode */
+       if (sdio_ireg & UP_LD_CMD_PORT_HOST_INT_STATUS) {
+               u32 pkt_type;
+
+               /* read the len of control packet */
+               rx_len = card->mp_regs[CMD_RD_LEN_1] << 8;
+               rx_len |= (u16) card->mp_regs[CMD_RD_LEN_0];
+               rx_blocks = DIV_ROUND_UP(rx_len, MWIFIEX_SDIO_BLOCK_SIZE);
+               if (rx_len <= INTF_HEADER_LEN ||
+                   (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
+                    MWIFIEX_RX_DATA_BUF_SIZE)
+                       return -1;
+               rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
+
+               skb = dev_alloc_skb(rx_len);
+               if (!skb)
+                       return -1;
+
+               skb_put(skb, rx_len);
+
+               if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data,
+                                             skb->len, adapter->ioport |
+                                                       CMD_PORT_SLCT)) {
+                       dev_err(adapter->dev,
+                               "%s: failed to card_to_host", __func__);
+                       dev_kfree_skb_any(skb);
+                       goto term_cmd;
+               }
+
+               if ((pkt_type != MWIFIEX_TYPE_CMD) &&
+                   (pkt_type != MWIFIEX_TYPE_EVENT))
+                       dev_err(adapter->dev,
+                               "%s:Received wrong packet on cmd port",
+                               __func__);
+
+               mwifiex_decode_rx_packet(adapter, skb, pkt_type);
+       }
+
        if (sdio_ireg & DN_LD_HOST_INT_STATUS) {
-               card->mp_wr_bitmap = ((u16) card->mp_regs[WR_BITMAP_U]) << 8;
-               card->mp_wr_bitmap |= (u16) card->mp_regs[WR_BITMAP_L];
-               dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%04x\n",
+               bitmap = (u32) card->mp_regs[reg->wr_bitmap_l];
+               bitmap |= ((u32) card->mp_regs[reg->wr_bitmap_u]) << 8;
+               if (card->supports_sdio_new_mode) {
+                       bitmap |=
+                               ((u32) card->mp_regs[reg->wr_bitmap_1l]) << 16;
+                       bitmap |=
+                               ((u32) card->mp_regs[reg->wr_bitmap_1u]) << 24;
+               }
+               card->mp_wr_bitmap = bitmap;
+
+               dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%x\n",
                        card->mp_wr_bitmap);
                if (adapter->data_sent &&
                    (card->mp_wr_bitmap & card->mp_data_port_mask)) {
@@ -1200,11 +1361,11 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
        /* As firmware will not generate download ready interrupt if the port
           updated is command port only, cmd_sent should be done for any SDIO
           interrupt. */
-       if (adapter->cmd_sent) {
+       if (card->has_control_mask && adapter->cmd_sent) {
                /* Check if firmware has attach buffer at command port and
                   update just that in wr_bit_map. */
                card->mp_wr_bitmap |=
-                       (u16) card->mp_regs[WR_BITMAP_L] & CTRL_PORT_MASK;
+                       (u32) card->mp_regs[reg->wr_bitmap_l] & CTRL_PORT_MASK;
                if (card->mp_wr_bitmap & CTRL_PORT_MASK)
                        adapter->cmd_sent = false;
        }
@@ -1212,9 +1373,16 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
        dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
                adapter->cmd_sent, adapter->data_sent);
        if (sdio_ireg & UP_LD_HOST_INT_STATUS) {
-               card->mp_rd_bitmap = ((u16) card->mp_regs[RD_BITMAP_U]) << 8;
-               card->mp_rd_bitmap |= (u16) card->mp_regs[RD_BITMAP_L];
-               dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%04x\n",
+               bitmap = (u32) card->mp_regs[reg->rd_bitmap_l];
+               bitmap |= ((u32) card->mp_regs[reg->rd_bitmap_u]) << 8;
+               if (card->supports_sdio_new_mode) {
+                       bitmap |=
+                               ((u32) card->mp_regs[reg->rd_bitmap_1l]) << 16;
+                       bitmap |=
+                               ((u32) card->mp_regs[reg->rd_bitmap_1u]) << 24;
+               }
+               card->mp_rd_bitmap = bitmap;
+               dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%x\n",
                        card->mp_rd_bitmap);
 
                while (true) {
@@ -1224,8 +1392,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                                        "info: no more rd_port available\n");
                                break;
                        }
-                       len_reg_l = RD_LEN_P0_L + (port << 1);
-                       len_reg_u = RD_LEN_P0_U + (port << 1);
+                       len_reg_l = reg->rd_len_p0_l + (port << 1);
+                       len_reg_u = reg->rd_len_p0_u + (port << 1);
                        rx_len = ((u16) card->mp_regs[len_reg_u]) << 8;
                        rx_len |= (u16) card->mp_regs[len_reg_l];
                        dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n",
@@ -1257,37 +1425,33 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
 
                        if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb,
                                                              port)) {
-                               u32 cr = 0;
-
                                dev_err(adapter->dev, "card_to_host_mpa failed:"
                                        " int status=%#x\n", sdio_ireg);
-                               if (mwifiex_read_reg(adapter,
-                                                    CONFIGURATION_REG, &cr))
-                                       dev_err(adapter->dev,
-                                               "read CFG reg failed\n");
-
-                               dev_dbg(adapter->dev,
-                                       "info: CFG reg val = %d\n", cr);
-                               if (mwifiex_write_reg(adapter,
-                                                     CONFIGURATION_REG,
-                                                     (cr | 0x04)))
-                                       dev_err(adapter->dev,
-                                               "write CFG reg failed\n");
-
-                               dev_dbg(adapter->dev, "info: write success\n");
-                               if (mwifiex_read_reg(adapter,
-                                                    CONFIGURATION_REG, &cr))
-                                       dev_err(adapter->dev,
-                                               "read CFG reg failed\n");
-
-                               dev_dbg(adapter->dev,
-                                       "info: CFG reg val =%x\n", cr);
-                               return -1;
+                               goto term_cmd;
                        }
                }
        }
 
        return 0;
+
+term_cmd:
+       /* terminate cmd */
+       if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
+               dev_err(adapter->dev, "read CFG reg failed\n");
+       else
+               dev_dbg(adapter->dev, "info: CFG reg val = %d\n", cr);
+
+       if (mwifiex_write_reg(adapter, CONFIGURATION_REG, (cr | 0x04)))
+               dev_err(adapter->dev, "write CFG reg failed\n");
+       else
+               dev_dbg(adapter->dev, "info: write success\n");
+
+       if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
+               dev_err(adapter->dev, "read CFG reg failed\n");
+       else
+               dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr);
+
+       return -1;
 }
 
 /*
@@ -1305,7 +1469,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
  * and return.
  */
 static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
-                                       u8 *payload, u32 pkt_len, u8 port,
+                                       u8 *payload, u32 pkt_len, u32 port,
                                        u32 next_pkt_len)
 {
        struct sdio_mmc_card *card = adapter->card;
@@ -1314,8 +1478,11 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
        s32 f_send_cur_buf = 0;
        s32 f_precopy_cur_buf = 0;
        s32 f_postcopy_cur_buf = 0;
+       u32 mport;
 
-       if ((!card->mpa_tx.enabled) || (port == CTRL_PORT)) {
+       if (!card->mpa_tx.enabled ||
+           (card->has_control_mask && (port == CTRL_PORT)) ||
+           (card->supports_sdio_new_mode && (port == CMD_PORT_SLCT))) {
                dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n",
                        __func__);
 
@@ -1329,7 +1496,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
                        __func__);
 
                if (MP_TX_AGGR_IN_PROGRESS(card)) {
-                       if (!MP_TX_AGGR_PORT_LIMIT_REACHED(card) &&
+                       if (!mp_tx_aggr_port_limit_reached(card) &&
                            MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) {
                                f_precopy_cur_buf = 1;
 
@@ -1342,7 +1509,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
                                /* No room in Aggr buf, send it */
                                f_send_aggr_buf = 1;
 
-                               if (MP_TX_AGGR_PORT_LIMIT_REACHED(card) ||
+                               if (mp_tx_aggr_port_limit_reached(card) ||
                                    !(card->mp_wr_bitmap &
                                      (1 << card->curr_wr_port)))
                                        f_send_cur_buf = 1;
@@ -1381,7 +1548,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
                MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
 
                if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) ||
-                   MP_TX_AGGR_PORT_LIMIT_REACHED(card))
+                   mp_tx_aggr_port_limit_reached(card))
                        /* No more pkts allowed in Aggr buf, send it */
                        f_send_aggr_buf = 1;
        }
@@ -1390,11 +1557,28 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
                dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n",
                        __func__,
                                card->mpa_tx.start_port, card->mpa_tx.ports);
+               if (card->supports_sdio_new_mode) {
+                       u32 port_count;
+                       int i;
+
+                       for (i = 0, port_count = 0; i < card->max_ports; i++)
+                               if (card->mpa_tx.ports & BIT(i))
+                                       port_count++;
+
+                       /* Writing data from "start_port + 0" to "start_port +
+                        * port_count -1", so decrease the count by 1
+                        */
+                       port_count--;
+                       mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
+                                (port_count << 8)) + card->mpa_tx.start_port;
+               } else {
+                       mport = (adapter->ioport | SDIO_MPA_ADDR_BASE |
+                                (card->mpa_tx.ports << 4)) +
+                                card->mpa_tx.start_port;
+               }
+
                ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf,
-                                                card->mpa_tx.buf_len,
-                                                (adapter->ioport | 0x1000 |
-                                                (card->mpa_tx.ports << 4)) +
-                                                 card->mpa_tx.start_port);
+                                                card->mpa_tx.buf_len, mport);
 
                MP_TX_AGGR_BUF_RESET(card);
        }
@@ -1434,7 +1618,7 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
        int ret;
        u32 buf_block_len;
        u32 blk_size;
-       u8 port = CTRL_PORT;
+       u32 port = CTRL_PORT;
        u8 *payload = (u8 *)skb->data;
        u32 pkt_len = skb->len;
 
@@ -1465,6 +1649,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
                    pkt_len > MWIFIEX_UPLD_SIZE)
                        dev_err(adapter->dev, "%s: payload=%p, nb=%d\n",
                                __func__, payload, pkt_len);
+
+               if (card->supports_sdio_new_mode)
+                       port = CMD_PORT_SLCT;
        }
 
        /* Transfer data to card */
@@ -1586,18 +1773,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
 
        adapter->dev = &func->dev;
 
-       switch (func->device) {
-       case SDIO_DEVICE_ID_MARVELL_8786:
-               strcpy(adapter->fw_name, SD8786_DEFAULT_FW_NAME);
-               break;
-       case SDIO_DEVICE_ID_MARVELL_8797:
-               strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME);
-               break;
-       case SDIO_DEVICE_ID_MARVELL_8787:
-       default:
-               strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
-               break;
-       }
+       strcpy(adapter->fw_name, card->firmware);
 
        return 0;
 
@@ -1626,8 +1802,9 @@ disable_func:
 static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
+       const struct mwifiex_sdio_card_reg *reg = card->reg;
        int ret;
-       u32 sdio_ireg;
+       u8 sdio_ireg;
 
        /*
         * Read the HOST_INT_STATUS_REG for ACK the first interrupt got
@@ -1645,30 +1822,35 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
        /* Initialize SDIO variables in card */
        card->mp_rd_bitmap = 0;
        card->mp_wr_bitmap = 0;
-       card->curr_rd_port = 1;
-       card->curr_wr_port = 1;
+       card->curr_rd_port = reg->start_rd_port;
+       card->curr_wr_port = reg->start_wr_port;
 
-       card->mp_data_port_mask = DATA_PORT_MASK;
+       card->mp_data_port_mask = reg->data_port_mask;
 
        card->mpa_tx.buf_len = 0;
        card->mpa_tx.pkt_cnt = 0;
        card->mpa_tx.start_port = 0;
 
        card->mpa_tx.enabled = 1;
-       card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
+       card->mpa_tx.pkt_aggr_limit = card->mp_agg_pkt_limit;
 
        card->mpa_rx.buf_len = 0;
        card->mpa_rx.pkt_cnt = 0;
        card->mpa_rx.start_port = 0;
 
        card->mpa_rx.enabled = 1;
-       card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
+       card->mpa_rx.pkt_aggr_limit = card->mp_agg_pkt_limit;
 
        /* Allocate buffers for SDIO MP-A */
-       card->mp_regs = kzalloc(MAX_MP_REGS, GFP_KERNEL);
+       card->mp_regs = kzalloc(reg->max_mp_regs, GFP_KERNEL);
        if (!card->mp_regs)
                return -ENOMEM;
 
+       /* Allocate skb pointer buffers */
+       card->mpa_rx.skb_arr = kzalloc((sizeof(void *)) *
+                                      card->mp_agg_pkt_limit, GFP_KERNEL);
+       card->mpa_rx.len_arr = kzalloc(sizeof(*card->mpa_rx.len_arr) *
+                                      card->mp_agg_pkt_limit, GFP_KERNEL);
        ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
                                             SDIO_MP_TX_AGGR_DEF_BUF_SIZE,
                                             SDIO_MP_RX_AGGR_DEF_BUF_SIZE);
@@ -1705,6 +1887,8 @@ static void mwifiex_cleanup_sdio(struct mwifiex_adapter *adapter)
        struct sdio_mmc_card *card = adapter->card;
 
        kfree(card->mp_regs);
+       kfree(card->mpa_rx.skb_arr);
+       kfree(card->mpa_rx.len_arr);
        kfree(card->mpa_tx.buf);
        kfree(card->mpa_rx.buf);
 }
@@ -1716,16 +1900,20 @@ static void
 mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
 {
        struct sdio_mmc_card *card = adapter->card;
+       const struct mwifiex_sdio_card_reg *reg = card->reg;
        int i;
 
        card->mp_end_port = port;
 
-       card->mp_data_port_mask = DATA_PORT_MASK;
+       card->mp_data_port_mask = reg->data_port_mask;
 
-       for (i = 1; i <= MAX_PORT - card->mp_end_port; i++)
-               card->mp_data_port_mask &= ~(1 << (MAX_PORT - i));
+       if (reg->start_wr_port) {
+               for (i = 1; i <= card->max_ports - card->mp_end_port; i++)
+                       card->mp_data_port_mask &=
+                                       ~(1 << (card->max_ports - i));
+       }
 
-       card->curr_wr_port = 1;
+       card->curr_wr_port = reg->start_wr_port;
 
        dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n",
                port, card->mp_data_port_mask);
@@ -1831,3 +2019,4 @@ MODULE_LICENSE("GPL v2");
 MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
index 8cc5468654b401f2b116c8963859f1712b299cb4..6d51dfdd8251a515c3f6a17e94029b5c10f6672a 100644 (file)
 #define SD8786_DEFAULT_FW_NAME "mrvl/sd8786_uapsta.bin"
 #define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
 #define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
+#define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
 
 #define BLOCK_MODE     1
 #define BYTE_MODE      0
 
 #define REG_PORT                       0
-#define RD_BITMAP_L                    0x04
-#define RD_BITMAP_U                    0x05
-#define WR_BITMAP_L                    0x06
-#define WR_BITMAP_U                    0x07
-#define RD_LEN_P0_L                    0x08
-#define RD_LEN_P0_U                    0x09
 
 #define MWIFIEX_SDIO_IO_PORT_MASK              0xfffff
 
 #define MWIFIEX_SDIO_BYTE_MODE_MASK    0x80000000
 
+#define SDIO_MPA_ADDR_BASE             0x1000
 #define CTRL_PORT                      0
 #define CTRL_PORT_MASK                 0x0001
-#define DATA_PORT_MASK                 0xfffe
 
-#define MAX_MP_REGS                    64
-#define MAX_PORT                       16
-
-#define SDIO_MP_AGGR_DEF_PKT_LIMIT     8
+#define CMD_PORT_UPLD_INT_MASK         (0x1U<<6)
+#define CMD_PORT_DNLD_INT_MASK         (0x1U<<7)
+#define HOST_TERM_CMD53                        (0x1U << 2)
+#define REG_PORT                       0
+#define MEM_PORT                       0x10000
+#define CMD_RD_LEN_0                   0xB4
+#define CMD_RD_LEN_1                   0xB5
+#define CARD_CONFIG_2_1_REG             0xCD
+#define CMD53_NEW_MODE                 (0x1U << 0)
+#define CMD_CONFIG_0                   0xB8
+#define CMD_PORT_RD_LEN_EN             (0x1U << 2)
+#define CMD_CONFIG_1                   0xB9
+#define CMD_PORT_AUTO_EN               (0x1U << 0)
+#define CMD_PORT_SLCT                  0x8000
+#define UP_LD_CMD_PORT_HOST_INT_STATUS (0x40U)
+#define DN_LD_CMD_PORT_HOST_INT_STATUS (0x80U)
 
 #define SDIO_MP_TX_AGGR_DEF_BUF_SIZE        (8192)     /* 8K */
 
 
 /* Host Control Registers : Configuration */
 #define CONFIGURATION_REG              0x00
-/* Host Control Registers : Host without Command 53 finish host*/
-#define HOST_TO_CARD_EVENT       (0x1U << 3)
-/* Host Control Registers : Host without Command 53 finish host */
-#define HOST_WO_CMD53_FINISH_HOST      (0x1U << 2)
 /* Host Control Registers : Host power up */
 #define HOST_POWER_UP                  (0x1U << 1)
-/* Host Control Registers : Host power down */
-#define HOST_POWER_DOWN                        (0x1U << 0)
 
 /* Host Control Registers : Host interrupt mask */
 #define HOST_INT_MASK_REG              0x02
@@ -90,8 +91,7 @@
 #define UP_LD_HOST_INT_MASK            (0x1U)
 /* Host Control Registers : Download host interrupt mask */
 #define DN_LD_HOST_INT_MASK            (0x2U)
-/* Enable Host interrupt mask */
-#define HOST_INT_ENABLE        (UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK)
+
 /* Disable Host interrupt mask */
 #define        HOST_INT_DISABLE                0xff
 
 
 /* Host Control Registers : Host interrupt RSR */
 #define HOST_INT_RSR_REG               0x01
-/* Host Control Registers : Upload host interrupt RSR */
-#define UP_LD_HOST_INT_RSR             (0x1U)
-#define SDIO_INT_MASK                  0x3F
 
 /* Host Control Registers : Host interrupt status */
 #define HOST_INT_STATUS_REG            0x28
-/* Host Control Registers : Upload CRC error */
-#define UP_LD_CRC_ERR                  (0x1U << 2)
-/* Host Control Registers : Upload restart */
-#define UP_LD_RESTART                   (0x1U << 1)
-/* Host Control Registers : Download restart */
-#define DN_LD_RESTART                   (0x1U << 0)
-
-/* Card Control Registers : Card status register */
-#define CARD_STATUS_REG                 0x30
+
 /* Card Control Registers : Card I/O ready */
 #define CARD_IO_READY                   (0x1U << 3)
-/* Card Control Registers : CIS card ready */
-#define CIS_CARD_RDY                    (0x1U << 2)
-/* Card Control Registers : Upload card ready */
-#define UP_LD_CARD_RDY                  (0x1U << 1)
 /* Card Control Registers : Download card ready */
 #define DN_LD_CARD_RDY                  (0x1U << 0)
 
-/* Card Control Registers : Host interrupt mask register */
-#define HOST_INTERRUPT_MASK_REG         0x34
-/* Card Control Registers : Host power interrupt mask */
-#define HOST_POWER_INT_MASK             (0x1U << 3)
-/* Card Control Registers : Abort card interrupt mask */
-#define ABORT_CARD_INT_MASK             (0x1U << 2)
-/* Card Control Registers : Upload card interrupt mask */
-#define UP_LD_CARD_INT_MASK             (0x1U << 1)
-/* Card Control Registers : Download card interrupt mask */
-#define DN_LD_CARD_INT_MASK             (0x1U << 0)
-
-/* Card Control Registers : Card interrupt status register */
-#define CARD_INTERRUPT_STATUS_REG       0x38
-/* Card Control Registers : Power up interrupt */
-#define POWER_UP_INT                    (0x1U << 4)
-/* Card Control Registers : Power down interrupt */
-#define POWER_DOWN_INT                  (0x1U << 3)
-
-/* Card Control Registers : Card interrupt RSR register */
-#define CARD_INTERRUPT_RSR_REG          0x3c
-/* Card Control Registers : Power up RSR */
-#define POWER_UP_RSR                    (0x1U << 4)
-/* Card Control Registers : Power down RSR */
-#define POWER_DOWN_RSR                  (0x1U << 3)
-
-/* Card Control Registers : Miscellaneous Configuration Register */
-#define CARD_MISC_CFG_REG               0x6C
-
-/* Host F1 read base 0 */
-#define HOST_F1_RD_BASE_0              0x0040
-/* Host F1 read base 1 */
-#define HOST_F1_RD_BASE_1              0x0041
-/* Host F1 card ready */
-#define HOST_F1_CARD_RDY               0x0020
-
-/* Firmware status 0 register */
-#define CARD_FW_STATUS0_REG            0x60
-/* Firmware status 1 register */
-#define CARD_FW_STATUS1_REG            0x61
-/* Rx length register */
-#define CARD_RX_LEN_REG                        0x62
-/* Rx unit register */
-#define CARD_RX_UNIT_REG               0x63
-
 /* Max retry number of CMD53 write */
 #define MAX_WRITE_IOMEM_RETRY          2
 
        if (a->mpa_tx.start_port <= port)                               \
                a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt));            \
        else                                                            \
-               a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT - \
+               a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+            \
+                                               (a->max_ports - \
                                                a->mp_end_port)));      \
        a->mpa_tx.pkt_cnt++;                                            \
 } while (0)
 #define MP_TX_AGGR_PKT_LIMIT_REACHED(a)                                        \
                        (a->mpa_tx.pkt_cnt == a->mpa_tx.pkt_aggr_limit)
 
-/* SDIO Tx aggregation port limit ? */
-#define MP_TX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_wr_port <           \
-                       a->mpa_tx.start_port) && (((MAX_PORT -          \
-                       a->mpa_tx.start_port) + a->curr_wr_port) >=     \
-                               SDIO_MP_AGGR_DEF_PKT_LIMIT))
-
 /* Reset SDIO Tx aggregation buffer parameters */
 #define MP_TX_AGGR_BUF_RESET(a) do {                                   \
        a->mpa_tx.pkt_cnt = 0;                                          \
 #define MP_RX_AGGR_PKT_LIMIT_REACHED(a)                                        \
                        (a->mpa_rx.pkt_cnt == a->mpa_rx.pkt_aggr_limit)
 
-/* SDIO Tx aggregation port limit ? */
-#define MP_RX_AGGR_PORT_LIMIT_REACHED(a) ((a->curr_rd_port <           \
-                       a->mpa_rx.start_port) && (((MAX_PORT -          \
-                       a->mpa_rx.start_port) + a->curr_rd_port) >=     \
-                       SDIO_MP_AGGR_DEF_PKT_LIMIT))
-
 /* SDIO Rx aggregation in progress ? */
 #define MP_RX_AGGR_IN_PROGRESS(a) (a->mpa_rx.pkt_cnt > 0)
 
 #define MP_RX_AGGR_BUF_HAS_ROOM(a, rx_len)                             \
                        ((a->mpa_rx.buf_len+rx_len) <= a->mpa_rx.buf_size)
 
-/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */
-#define MP_RX_AGGR_SETUP(a, skb, port) do {                            \
-       a->mpa_rx.buf_len += skb->len;                                  \
-       if (!a->mpa_rx.pkt_cnt)                                         \
-               a->mpa_rx.start_port = port;                            \
-       if (a->mpa_rx.start_port <= port)                               \
-               a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt));            \
-       else                                                            \
-               a->mpa_rx.ports |= (1<<(a->mpa_rx.pkt_cnt+1));          \
-       a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb;                     \
-       a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len;                \
-       a->mpa_rx.pkt_cnt++;                                            \
-} while (0)
-
 /* Reset SDIO Rx aggregation buffer parameters */
 #define MP_RX_AGGR_BUF_RESET(a) do {                                   \
        a->mpa_rx.pkt_cnt = 0;                                          \
        a->mpa_rx.start_port = 0;                                       \
 } while (0)
 
-
 /* data structure for SDIO MPA TX */
 struct mwifiex_sdio_mpa_tx {
        /* multiport tx aggregation buffer pointer */
        u8 *buf;
        u32 buf_len;
        u32 pkt_cnt;
-       u16 ports;
+       u32 ports;
        u16 start_port;
        u8 enabled;
        u32 buf_size;
@@ -272,11 +187,11 @@ struct mwifiex_sdio_mpa_rx {
        u8 *buf;
        u32 buf_len;
        u32 pkt_cnt;
-       u16 ports;
+       u32 ports;
        u16 start_port;
 
-       struct sk_buff *skb_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT];
-       u32 len_arr[SDIO_MP_AGGR_DEF_PKT_LIMIT];
+       struct sk_buff **skb_arr;
+       u32 *len_arr;
 
        u8 enabled;
        u32 buf_size;
@@ -286,15 +201,47 @@ struct mwifiex_sdio_mpa_rx {
 int mwifiex_bus_register(void);
 void mwifiex_bus_unregister(void);
 
+struct mwifiex_sdio_card_reg {
+       u8 start_rd_port;
+       u8 start_wr_port;
+       u8 base_0_reg;
+       u8 base_1_reg;
+       u8 poll_reg;
+       u8 host_int_enable;
+       u8 status_reg_0;
+       u8 status_reg_1;
+       u8 sdio_int_mask;
+       u32 data_port_mask;
+       u8 max_mp_regs;
+       u8 rd_bitmap_l;
+       u8 rd_bitmap_u;
+       u8 rd_bitmap_1l;
+       u8 rd_bitmap_1u;
+       u8 wr_bitmap_l;
+       u8 wr_bitmap_u;
+       u8 wr_bitmap_1l;
+       u8 wr_bitmap_1u;
+       u8 rd_len_p0_l;
+       u8 rd_len_p0_u;
+       u8 card_misc_cfg_reg;
+};
+
 struct sdio_mmc_card {
        struct sdio_func *func;
        struct mwifiex_adapter *adapter;
 
-       u16 mp_rd_bitmap;
-       u16 mp_wr_bitmap;
+       const char *firmware;
+       const struct mwifiex_sdio_card_reg *reg;
+       u8 max_ports;
+       u8 mp_agg_pkt_limit;
+       bool supports_sdio_new_mode;
+       bool has_control_mask;
+
+       u32 mp_rd_bitmap;
+       u32 mp_wr_bitmap;
 
        u16 mp_end_port;
-       u16 mp_data_port_mask;
+       u32 mp_data_port_mask;
 
        u8 curr_rd_port;
        u8 curr_wr_port;
@@ -305,6 +252,98 @@ struct sdio_mmc_card {
        struct mwifiex_sdio_mpa_rx mpa_rx;
 };
 
+struct mwifiex_sdio_device {
+       const char *firmware;
+       const struct mwifiex_sdio_card_reg *reg;
+       u8 max_ports;
+       u8 mp_agg_pkt_limit;
+       bool supports_sdio_new_mode;
+       bool has_control_mask;
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
+       .start_rd_port = 1,
+       .start_wr_port = 1,
+       .base_0_reg = 0x0040,
+       .base_1_reg = 0x0041,
+       .poll_reg = 0x30,
+       .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK,
+       .status_reg_0 = 0x60,
+       .status_reg_1 = 0x61,
+       .sdio_int_mask = 0x3f,
+       .data_port_mask = 0x0000fffe,
+       .max_mp_regs = 64,
+       .rd_bitmap_l = 0x04,
+       .rd_bitmap_u = 0x05,
+       .wr_bitmap_l = 0x06,
+       .wr_bitmap_u = 0x07,
+       .rd_len_p0_l = 0x08,
+       .rd_len_p0_u = 0x09,
+       .card_misc_cfg_reg = 0x6c,
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
+       .start_rd_port = 0,
+       .start_wr_port = 0,
+       .base_0_reg = 0x60,
+       .base_1_reg = 0x61,
+       .poll_reg = 0x50,
+       .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+                       CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+       .status_reg_0 = 0xc0,
+       .status_reg_1 = 0xc1,
+       .sdio_int_mask = 0xff,
+       .data_port_mask = 0xffffffff,
+       .max_mp_regs = 184,
+       .rd_bitmap_l = 0x04,
+       .rd_bitmap_u = 0x05,
+       .rd_bitmap_1l = 0x06,
+       .rd_bitmap_1u = 0x07,
+       .wr_bitmap_l = 0x08,
+       .wr_bitmap_u = 0x09,
+       .wr_bitmap_1l = 0x0a,
+       .wr_bitmap_1u = 0x0b,
+       .rd_len_p0_l = 0x0c,
+       .rd_len_p0_u = 0x0d,
+       .card_misc_cfg_reg = 0xcc,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
+       .firmware = SD8786_DEFAULT_FW_NAME,
+       .reg = &mwifiex_reg_sd87xx,
+       .max_ports = 16,
+       .mp_agg_pkt_limit = 8,
+       .supports_sdio_new_mode = false,
+       .has_control_mask = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
+       .firmware = SD8787_DEFAULT_FW_NAME,
+       .reg = &mwifiex_reg_sd87xx,
+       .max_ports = 16,
+       .mp_agg_pkt_limit = 8,
+       .supports_sdio_new_mode = false,
+       .has_control_mask = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
+       .firmware = SD8797_DEFAULT_FW_NAME,
+       .reg = &mwifiex_reg_sd87xx,
+       .max_ports = 16,
+       .mp_agg_pkt_limit = 8,
+       .supports_sdio_new_mode = false,
+       .has_control_mask = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
+       .firmware = SD8897_DEFAULT_FW_NAME,
+       .reg = &mwifiex_reg_sd8897,
+       .max_ports = 32,
+       .mp_agg_pkt_limit = 16,
+       .supports_sdio_new_mode = true,
+       .has_control_mask = false,
+};
+
 /*
  * .cmdrsp_complete handler
  */
@@ -325,4 +364,77 @@ static inline int mwifiex_sdio_event_complete(struct mwifiex_adapter *adapter,
        return 0;
 }
 
+static inline bool
+mp_rx_aggr_port_limit_reached(struct sdio_mmc_card *card)
+{
+       u8 tmp;
+
+       if (card->curr_rd_port < card->mpa_rx.start_port) {
+               if (card->supports_sdio_new_mode)
+                       tmp = card->mp_end_port >> 1;
+               else
+                       tmp = card->mp_agg_pkt_limit;
+
+               if (((card->max_ports - card->mpa_rx.start_port) +
+                   card->curr_rd_port) >= tmp)
+                       return true;
+       }
+
+       if (!card->supports_sdio_new_mode)
+               return false;
+
+       if ((card->curr_rd_port - card->mpa_rx.start_port) >=
+           (card->mp_end_port >> 1))
+               return true;
+
+       return false;
+}
+
+static inline bool
+mp_tx_aggr_port_limit_reached(struct sdio_mmc_card *card)
+{
+       u16 tmp;
+
+       if (card->curr_wr_port < card->mpa_tx.start_port) {
+               if (card->supports_sdio_new_mode)
+                       tmp = card->mp_end_port >> 1;
+               else
+                       tmp = card->mp_agg_pkt_limit;
+
+               if (((card->max_ports - card->mpa_tx.start_port) +
+                   card->curr_wr_port) >= tmp)
+                       return true;
+       }
+
+       if (!card->supports_sdio_new_mode)
+               return false;
+
+       if ((card->curr_wr_port - card->mpa_tx.start_port) >=
+           (card->mp_end_port >> 1))
+               return true;
+
+       return false;
+}
+
+/* Prepare to copy current packet from card to SDIO Rx aggregation buffer */
+static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card,
+                                   struct sk_buff *skb, u8 port)
+{
+       card->mpa_rx.buf_len += skb->len;
+
+       if (!card->mpa_rx.pkt_cnt)
+               card->mpa_rx.start_port = port;
+
+       if (card->supports_sdio_new_mode) {
+               card->mpa_rx.ports |= (1 << port);
+       } else {
+               if (card->mpa_rx.start_port <= port)
+                       card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt);
+               else
+                       card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1);
+       }
+       card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = skb;
+       card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = skb->len;
+       card->mpa_rx.pkt_cnt++;
+}
 #endif /* _MWIFIEX_SDIO_H */
index b193e25977d2a321d6a1df1923729b7039cf8825..8ece48580642b28e92ecf5d28629d8c2585dec05 100644 (file)
@@ -1134,6 +1134,55 @@ mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
        return 0;
 }
 
+/* This function parse cal data from ASCII to hex */
+static u32 mwifiex_parse_cal_cfg(u8 *src, size_t len, u8 *dst)
+{
+       u8 *s = src, *d = dst;
+
+       while (s - src < len) {
+               if (*s && (isspace(*s) || *s == '\t')) {
+                       s++;
+                       continue;
+               }
+               if (isxdigit(*s)) {
+                       *d++ = simple_strtol(s, NULL, 16);
+                       s += 2;
+               } else {
+                       s++;
+               }
+       }
+
+       return d - dst;
+}
+
+/* This function prepares command of set_cfg_data. */
+static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
+                               struct host_cmd_ds_command *cmd,
+                               u16 cmd_action)
+{
+       struct host_cmd_ds_802_11_cfg_data *cfg_data = &cmd->params.cfg_data;
+       struct mwifiex_adapter *adapter = priv->adapter;
+       u32 len, cal_data_offset;
+       u8 *tmp_cmd = (u8 *)cmd;
+
+       cal_data_offset = S_DS_GEN + sizeof(*cfg_data);
+       if ((adapter->cal_data->data) && (adapter->cal_data->size > 0))
+               len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data,
+                                           adapter->cal_data->size,
+                                           (u8 *)(tmp_cmd + cal_data_offset));
+       else
+               return -1;
+
+       cfg_data->action = cpu_to_le16(cmd_action);
+       cfg_data->type = cpu_to_le16(CFG_DATA_TYPE_CAL);
+       cfg_data->data_len = cpu_to_le16(len);
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_CFG_DATA);
+       cmd->size = cpu_to_le16(S_DS_GEN + sizeof(*cfg_data) + len);
+
+       return 0;
+}
+
 /*
  * This function prepares the commands before sending them to the firmware.
  *
@@ -1152,6 +1201,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
        case HostCmd_CMD_GET_HW_SPEC:
                ret = mwifiex_cmd_get_hw_spec(priv, cmd_ptr);
                break;
+       case HostCmd_CMD_CFG_DATA:
+               ret = mwifiex_cmd_cfg_data(priv, cmd_ptr, cmd_action);
+               break;
        case HostCmd_CMD_MAC_CONTROL:
                ret = mwifiex_cmd_mac_control(priv, cmd_ptr, cmd_action,
                                              data_buf);
@@ -1384,6 +1436,7 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
  */
 int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
 {
+       struct mwifiex_adapter *adapter = priv->adapter;
        int ret;
        u16 enable = true;
        struct mwifiex_ds_11n_amsdu_aggr_ctrl amsdu_aggr_ctrl;
@@ -1404,6 +1457,15 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
                                            HostCmd_ACT_GEN_SET, 0, NULL);
                if (ret)
                        return -1;
+
+               /* Download calibration data to firmware */
+               if (adapter->cal_data) {
+                       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
+                                               HostCmd_ACT_GEN_SET, 0, NULL);
+                       if (ret)
+                               return -1;
+               }
+
                /* Read MAC address from HW */
                ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC,
                                            HostCmd_ACT_GEN_GET, 0, NULL);
index 9f990e14966e8354a91b6e90bd69c413c3f562e7..d85df158cc6c03cb9002ee300d9c2f774fcdd4fa 100644 (file)
@@ -818,6 +818,18 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
        return 0;
 }
 
+/* This function handles the command response of set_cfg_data */
+static int mwifiex_ret_cfg_data(struct mwifiex_private *priv,
+                               struct host_cmd_ds_command *resp)
+{
+       if (resp->result != HostCmd_RESULT_OK) {
+               dev_err(priv->adapter->dev, "Cal data cmd resp failed\n");
+               return -1;
+       }
+
+       return 0;
+}
+
 /*
  * This function handles the command responses.
  *
@@ -841,6 +853,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
        case HostCmd_CMD_GET_HW_SPEC:
                ret = mwifiex_ret_get_hw_spec(priv, resp);
                break;
+       case HostCmd_CMD_CFG_DATA:
+               ret = mwifiex_ret_cfg_data(priv, resp);
+               break;
        case HostCmd_CMD_MAC_CONTROL:
                break;
        case HostCmd_CMD_802_11_MAC_ADDRESS:
@@ -978,6 +993,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
        case HostCmd_CMD_UAP_BSS_STOP:
                priv->bss_started = 0;
                break;
+       case HostCmd_CMD_UAP_STA_DEAUTH:
+               break;
        case HostCmd_CMD_MEF_CFG:
                break;
        default:
index 41aafc7454ed2b626bded2d96d9f8f9ac143a992..ea265ec0e522333837a13ab43a16d74ccaae7550 100644 (file)
@@ -427,6 +427,17 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
                break;
 
+       case EVENT_CHANNEL_SWITCH_ANN:
+               dev_dbg(adapter->dev, "event: Channel Switch Announcement\n");
+               priv->csa_expire_time =
+                               jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME);
+               priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel;
+               ret = mwifiex_send_cmd_async(priv,
+                       HostCmd_CMD_802_11_DEAUTHENTICATE,
+                       HostCmd_ACT_GEN_SET, 0,
+                       priv->curr_bss_params.bss_descriptor.mac_address);
+               break;
+
        default:
                dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
                        eventcause);
index 1a8a19dbd635df7b6b012533605cdde724c0d000..206c3e03807235425ee3c0ad12676cc7c187cad7 100644 (file)
@@ -104,16 +104,14 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
                } else {
                        priv->curr_pkt_filter &=
                                ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
-                       if (mcast_list->num_multicast_addr) {
-                               dev_dbg(priv->adapter->dev,
-                                       "info: Set multicast list=%d\n",
-                                      mcast_list->num_multicast_addr);
-                               /* Send multicast addresses to firmware */
-                               ret = mwifiex_send_cmd_async(priv,
-                                       HostCmd_CMD_MAC_MULTICAST_ADR,
-                                       HostCmd_ACT_GEN_SET, 0,
-                                       mcast_list);
-                       }
+                       dev_dbg(priv->adapter->dev,
+                               "info: Set multicast list=%d\n",
+                               mcast_list->num_multicast_addr);
+                       /* Send multicast addresses to firmware */
+                       ret = mwifiex_send_cmd_async(priv,
+                               HostCmd_CMD_MAC_MULTICAST_ADR,
+                               HostCmd_ACT_GEN_SET, 0,
+                               mcast_list);
                }
        }
        dev_dbg(priv->adapter->dev,
@@ -180,6 +178,9 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
         */
        bss_desc->disable_11ac = true;
 
+       if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_SPECTRUM_MGMT)
+               bss_desc->sensed_11h = true;
+
        return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
 }
 
@@ -257,30 +258,37 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
        }
 
        if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+               u8 config_bands;
+
                /* Infra mode */
                ret = mwifiex_deauthenticate(priv, NULL);
                if (ret)
                        goto done;
 
-               if (bss_desc) {
-                       u8 config_bands = 0;
+               if (!bss_desc)
+                       return -1;
 
-                       if (mwifiex_band_to_radio_type((u8) bss_desc->bss_band)
-                           == HostCmd_SCAN_RADIO_TYPE_BG)
-                               config_bands = BAND_B | BAND_G | BAND_GN |
-                                              BAND_GAC;
-                       else
-                               config_bands = BAND_A | BAND_AN | BAND_AAC;
+               if (mwifiex_band_to_radio_type(bss_desc->bss_band) ==
+                                               HostCmd_SCAN_RADIO_TYPE_BG)
+                       config_bands = BAND_B | BAND_G | BAND_GN | BAND_GAC;
+               else
+                       config_bands = BAND_A | BAND_AN | BAND_AAC;
 
-                       if (!((config_bands | adapter->fw_bands) &
-                             ~adapter->fw_bands))
-                               adapter->config_bands = config_bands;
-               }
+               if (!((config_bands | adapter->fw_bands) & ~adapter->fw_bands))
+                       adapter->config_bands = config_bands;
 
                ret = mwifiex_check_network_compatibility(priv, bss_desc);
                if (ret)
                        goto done;
 
+               if (mwifiex_11h_get_csa_closed_channel(priv) ==
+                                                       (u8)bss_desc->channel) {
+                       dev_err(adapter->dev,
+                               "Attempt to reconnect on csa closed chan(%d)\n",
+                               bss_desc->channel);
+                       goto done;
+               }
+
                dev_dbg(adapter->dev, "info: SSID found in scan list ... "
                                      "associating...\n");
 
index b04b1db291002b8174cb0cf013a69c97e0bf4e40..2de882dead0f879dee34831e0c61adc470d9dd70 100644 (file)
@@ -689,6 +689,23 @@ mwifiex_cmd_uap_sys_config(struct host_cmd_ds_command *cmd, u16 cmd_action,
        return 0;
 }
 
+/* This function prepares AP specific deauth command with mac supplied in
+ * function parameter.
+ */
+static int mwifiex_cmd_uap_sta_deauth(struct mwifiex_private *priv,
+                                     struct host_cmd_ds_command *cmd, u8 *mac)
+{
+       struct host_cmd_ds_sta_deauth *sta_deauth = &cmd->params.sta_deauth;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_UAP_STA_DEAUTH);
+       memcpy(sta_deauth->mac, mac, ETH_ALEN);
+       sta_deauth->reason = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
+
+       cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_sta_deauth) +
+                               S_DS_GEN);
+       return 0;
+}
+
 /* This function prepares the AP specific commands before sending them
  * to the firmware.
  * This is a generic function which calls specific command preparation
@@ -710,6 +727,10 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
                cmd->command = cpu_to_le16(cmd_no);
                cmd->size = cpu_to_le16(S_DS_GEN);
                break;
+       case HostCmd_CMD_UAP_STA_DEAUTH:
+               if (mwifiex_cmd_uap_sta_deauth(priv, cmd, data_buf))
+                       return -1;
+               break;
        default:
                dev_err(priv->adapter->dev,
                        "PREP_CMD: unknown cmd %#x\n", cmd_no);
index 21c640d3b57919792e53406ed94ae7cca96a90e7..718066577c6c531f22c9f4387639237875f04862 100644 (file)
@@ -107,18 +107,15 @@ mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
  */
 static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
 {
-       struct mwifiex_sta_node *node, *tmp;
+       struct mwifiex_sta_node *node;
        unsigned long flags;
 
        spin_lock_irqsave(&priv->sta_list_spinlock, flags);
 
        node = mwifiex_get_sta_entry(priv, mac);
        if (node) {
-               list_for_each_entry_safe(node, tmp, &priv->sta_list,
-                                        list) {
-                       list_del(&node->list);
-                       kfree(node);
-               }
+               list_del(&node->list);
+               kfree(node);
        }
 
        spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
@@ -295,3 +292,19 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
 
        return 0;
 }
+
+/* This function deletes station entry from associated station list.
+ * Also if both AP and STA are 11n enabled, RxReorder tables and TxBA stream
+ * tables created for this station are deleted.
+ */
+void mwifiex_uap_del_sta_data(struct mwifiex_private *priv,
+                             struct mwifiex_sta_node *node)
+{
+       if (priv->ap_11n_enabled && node->is_11n_enabled) {
+               mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, node->mac_addr);
+               mwifiex_del_tx_ba_stream_tbl_by_ra(priv, node->mac_addr);
+       }
+       mwifiex_del_sta_entry(priv, node->mac_addr);
+
+       return;
+}
index 4be3d33ceae81b25bddbab91ea01482282571f2b..944e8846f6fc757e8f937f0ece42e243279e9a29 100644 (file)
@@ -37,6 +37,9 @@
 /* Offset for TOS field in the IP header */
 #define IPTOS_OFFSET 5
 
+static bool enable_tx_amsdu;
+module_param(enable_tx_amsdu, bool, 0644);
+
 /* WMM information IE */
 static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
        0x00, 0x50, 0xf2, 0x02,
@@ -1233,7 +1236,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
                                mwifiex_send_delba(priv, tid_del, ra, 1);
                        }
                }
-               if (mwifiex_is_amsdu_allowed(priv, tid) &&
+               if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
                    mwifiex_is_11n_aggragation_possible(priv, ptr,
                                                        adapter->tx_buf_size))
                        mwifiex_11n_aggregate_pkt(priv, ptr, INTF_HEADER_LEN,
index 6820fce4016b48f7271c99119c6825b538e2f545..a3707fd4ef623e5968afc85e5d265cb49024d764 100644 (file)
@@ -1548,7 +1548,7 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
        if (!priv->pending_tx_pkts)
                return 0;
 
-       retry = 0;
+       retry = 1;
        rc = 0;
 
        spin_lock_bh(&priv->tx_lock);
@@ -1572,13 +1572,19 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
 
                spin_lock_bh(&priv->tx_lock);
 
-               if (timeout) {
+               if (timeout || !priv->pending_tx_pkts) {
                        WARN_ON(priv->pending_tx_pkts);
                        if (retry)
                                wiphy_notice(hw->wiphy, "tx rings drained\n");
                        break;
                }
 
+               if (retry) {
+                       mwl8k_tx_start(priv);
+                       retry = 0;
+                       continue;
+               }
+
                if (priv->pending_tx_pkts < oldcount) {
                        wiphy_notice(hw->wiphy,
                                     "waiting for tx rings to drain (%d -> %d pkts)\n",
@@ -2055,6 +2061,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw,
                                mwl8k_remove_stream(hw, stream);
                                spin_unlock(&priv->stream_lock);
                        }
+                       mwl8k_tx_start(priv);
                        spin_unlock_bh(&priv->tx_lock);
                        pci_unmap_single(priv->pdev, dma, skb->len,
                                         PCI_DMA_TODEVICE);
index 1f9cb55c33608c1a5daeab253ebd9372da8827e7..bdfe637953f4d3a41270abafa9e174d04901879c 100644 (file)
@@ -881,7 +881,8 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
 
        if (!upriv->udev) {
                dbg("Device disconnected");
-               return -ENODEV;
+               retval = -ENODEV;
+               goto exit;
        }
 
        if (upriv->read_urb->status != -EINPROGRESS)
index 978e7eb26567fc14cfe2a86faa294670f90e13ad..7fc46f26cf2be3085f08c56673931a44d538d098 100644 (file)
@@ -42,8 +42,7 @@
 
 MODULE_FIRMWARE("3826.arm");
 
-/*
- * gpios should be handled in board files and provided via platform data,
+/* gpios should be handled in board files and provided via platform data,
  * but because it's currently impossible for p54spi to have a header file
  * in include/linux, let's use module paramaters for now
  */
@@ -191,8 +190,7 @@ static int p54spi_request_eeprom(struct ieee80211_hw *dev)
        const struct firmware *eeprom;
        int ret;
 
-       /*
-        * allow users to customize their eeprom.
+       /* allow users to customize their eeprom.
         */
 
        ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev);
@@ -285,8 +283,7 @@ static void p54spi_power_on(struct p54s_priv *priv)
        gpio_set_value(p54spi_gpio_power, 1);
        enable_irq(gpio_to_irq(p54spi_gpio_irq));
 
-       /*
-        * need to wait a while before device can be accessed, the length
+       /* need to wait a while before device can be accessed, the length
         * is just a guess
         */
        msleep(10);
@@ -365,7 +362,8 @@ static int p54spi_rx(struct p54s_priv *priv)
        /* Firmware may insert up to 4 padding bytes after the lmac header,
         * but it does not amend the size of SPI data transfer.
         * Such packets has correct data size in header, thus referencing
-        * past the end of allocated skb. Reserve extra 4 bytes for this case */
+        * past the end of allocated skb. Reserve extra 4 bytes for this case
+        */
        skb = dev_alloc_skb(len + 4);
        if (!skb) {
                p54spi_sleep(priv);
@@ -383,7 +381,8 @@ static int p54spi_rx(struct p54s_priv *priv)
        }
        p54spi_sleep(priv);
        /* Put additional bytes to compensate for the possible
-        * alignment-caused truncation */
+        * alignment-caused truncation
+        */
        skb_put(skb, 4);
 
        if (p54_rx(priv->hw, skb) == 0)
@@ -713,27 +712,7 @@ static struct spi_driver p54spi_driver = {
        .remove         = p54spi_remove,
 };
 
-static int __init p54spi_init(void)
-{
-       int ret;
-
-       ret = spi_register_driver(&p54spi_driver);
-       if (ret < 0) {
-               printk(KERN_ERR "failed to register SPI driver: %d", ret);
-               goto out;
-       }
-
-out:
-       return ret;
-}
-
-static void __exit p54spi_exit(void)
-{
-       spi_unregister_driver(&p54spi_driver);
-}
-
-module_init(p54spi_init);
-module_exit(p54spi_exit);
+module_spi_driver(p54spi_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
index f7143733d7e95eaeba3a5f8ea8c2aff80c350334..3d53a09da5a12da22f2697c9a22b7dfb49daaff0 100644 (file)
@@ -1767,33 +1767,45 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
        .config                 = rt2400pci_config,
 };
 
-static const struct data_queue_desc rt2400pci_queue_rx = {
-       .entry_num              = 24,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = RXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+static void rt2400pci_queue_init(struct data_queue *queue)
+{
+       switch (queue->qid) {
+       case QID_RX:
+               queue->limit = 24;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = RXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
 
-static const struct data_queue_desc rt2400pci_queue_tx = {
-       .entry_num              = 24,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = TXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+       case QID_AC_VO:
+       case QID_AC_VI:
+       case QID_AC_BE:
+       case QID_AC_BK:
+               queue->limit = 24;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
 
-static const struct data_queue_desc rt2400pci_queue_bcn = {
-       .entry_num              = 1,
-       .data_size              = MGMT_FRAME_SIZE,
-       .desc_size              = TXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+       case QID_BEACON:
+               queue->limit = 1;
+               queue->data_size = MGMT_FRAME_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
 
-static const struct data_queue_desc rt2400pci_queue_atim = {
-       .entry_num              = 8,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = TXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+       case QID_ATIM:
+               queue->limit = 8;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
+
+       default:
+               BUG();
+               break;
+       }
+}
 
 static const struct rt2x00_ops rt2400pci_ops = {
        .name                   = KBUILD_MODNAME,
@@ -1801,11 +1813,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
        .tx_queues              = NUM_TX_QUEUES,
-       .extra_tx_headroom      = 0,
-       .rx                     = &rt2400pci_queue_rx,
-       .tx                     = &rt2400pci_queue_tx,
-       .bcn                    = &rt2400pci_queue_bcn,
-       .atim                   = &rt2400pci_queue_atim,
+       .queue_init             = rt2400pci_queue_init,
        .lib                    = &rt2400pci_rt2x00_ops,
        .hw                     = &rt2400pci_mac80211_ops,
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
index 77e45b223d158348b958a7b5a88b84cecfc6f3a2..0ac5c589ddceaffe7dd0f7d82ce7a508e37e4bd7 100644 (file)
@@ -2056,33 +2056,45 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
        .config                 = rt2500pci_config,
 };
 
-static const struct data_queue_desc rt2500pci_queue_rx = {
-       .entry_num              = 32,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = RXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+static void rt2500pci_queue_init(struct data_queue *queue)
+{
+       switch (queue->qid) {
+       case QID_RX:
+               queue->limit = 32;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = RXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
 
-static const struct data_queue_desc rt2500pci_queue_tx = {
-       .entry_num              = 32,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = TXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+       case QID_AC_VO:
+       case QID_AC_VI:
+       case QID_AC_BE:
+       case QID_AC_BK:
+               queue->limit = 32;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
 
-static const struct data_queue_desc rt2500pci_queue_bcn = {
-       .entry_num              = 1,
-       .data_size              = MGMT_FRAME_SIZE,
-       .desc_size              = TXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+       case QID_BEACON:
+               queue->limit = 1;
+               queue->data_size = MGMT_FRAME_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
 
-static const struct data_queue_desc rt2500pci_queue_atim = {
-       .entry_num              = 8,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = TXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+       case QID_ATIM:
+               queue->limit = 8;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
+
+       default:
+               BUG();
+               break;
+       }
+}
 
 static const struct rt2x00_ops rt2500pci_ops = {
        .name                   = KBUILD_MODNAME,
@@ -2090,11 +2102,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
        .tx_queues              = NUM_TX_QUEUES,
-       .extra_tx_headroom      = 0,
-       .rx                     = &rt2500pci_queue_rx,
-       .tx                     = &rt2500pci_queue_tx,
-       .bcn                    = &rt2500pci_queue_bcn,
-       .atim                   = &rt2500pci_queue_atim,
+       .queue_init             = rt2500pci_queue_init,
        .lib                    = &rt2500pci_rt2x00_ops,
        .hw                     = &rt2500pci_mac80211_ops,
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
index a7f7b365eff476c7f74e33660ea3c850882413f7..85acc79f68b840c48ffe76715546fb10fac377a9 100644 (file)
@@ -1867,33 +1867,45 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
        .config                 = rt2500usb_config,
 };
 
-static const struct data_queue_desc rt2500usb_queue_rx = {
-       .entry_num              = 32,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = RXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_usb),
-};
+static void rt2500usb_queue_init(struct data_queue *queue)
+{
+       switch (queue->qid) {
+       case QID_RX:
+               queue->limit = 32;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = RXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_usb);
+               break;
 
-static const struct data_queue_desc rt2500usb_queue_tx = {
-       .entry_num              = 32,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = TXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_usb),
-};
+       case QID_AC_VO:
+       case QID_AC_VI:
+       case QID_AC_BE:
+       case QID_AC_BK:
+               queue->limit = 32;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_usb);
+               break;
 
-static const struct data_queue_desc rt2500usb_queue_bcn = {
-       .entry_num              = 1,
-       .data_size              = MGMT_FRAME_SIZE,
-       .desc_size              = TXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_usb_bcn),
-};
+       case QID_BEACON:
+               queue->limit = 1;
+               queue->data_size = MGMT_FRAME_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_usb_bcn);
+               break;
 
-static const struct data_queue_desc rt2500usb_queue_atim = {
-       .entry_num              = 8,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = TXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_usb),
-};
+       case QID_ATIM:
+               queue->limit = 8;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_usb);
+               break;
+
+       default:
+               BUG();
+               break;
+       }
+}
 
 static const struct rt2x00_ops rt2500usb_ops = {
        .name                   = KBUILD_MODNAME,
@@ -1901,11 +1913,7 @@ static const struct rt2x00_ops rt2500usb_ops = {
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
        .tx_queues              = NUM_TX_QUEUES,
-       .extra_tx_headroom      = TXD_DESC_SIZE,
-       .rx                     = &rt2500usb_queue_rx,
-       .tx                     = &rt2500usb_queue_tx,
-       .bcn                    = &rt2500usb_queue_bcn,
-       .atim                   = &rt2500usb_queue_atim,
+       .queue_init             = rt2500usb_queue_init,
        .lib                    = &rt2500usb_rt2x00_ops,
        .hw                     = &rt2500usb_mac80211_ops,
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
index a7630d5ec8921cb9c3024e9fcf07e0bbf1a5071c..fe43d011f0045e1f156ced6fbae44f893b8ac8ca 100644 (file)
@@ -2625,11 +2625,13 @@ struct mac_iveiv_entry {
 /*
  * DMA descriptor defines.
  */
-#define TXWI_DESC_SIZE                 (4 * sizeof(__le32))
-#define RXWI_DESC_SIZE                 (4 * sizeof(__le32))
 
-#define TXWI_DESC_SIZE_5592            (5 * sizeof(__le32))
-#define RXWI_DESC_SIZE_5592            (6 * sizeof(__le32))
+#define TXWI_DESC_SIZE_4WORDS          (4 * sizeof(__le32))
+#define TXWI_DESC_SIZE_5WORDS          (5 * sizeof(__le32))
+
+#define RXWI_DESC_SIZE_4WORDS          (4 * sizeof(__le32))
+#define RXWI_DESC_SIZE_6WORDS          (6 * sizeof(__le32))
+
 /*
  * TX WI structure
  */
index 72f32e5caa4d1931b8719891c150f1b30304425e..4072242b24528ef32327010389ef09816f6b2f47 100644 (file)
@@ -840,7 +840,7 @@ static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
                                                unsigned int beacon_base)
 {
        int i;
-       const int txwi_desc_size = rt2x00dev->ops->bcn->winfo_size;
+       const int txwi_desc_size = rt2x00dev->bcn->winfo_size;
 
        /*
         * For the Beacon base registers we only need to clear
@@ -3960,379 +3960,577 @@ static void rt2800_init_bbp_early(struct rt2x00_dev *rt2x00dev)
        rt2800_bbp_write(rt2x00dev, 106, 0x35);
 }
 
-static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
+static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev)
 {
-       int ant, div_mode;
        u16 eeprom;
        u8 value;
 
-       rt2800_init_bbp_early(rt2x00dev);
+       rt2800_bbp_read(rt2x00dev, 138, &value);
+       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+       if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
+               value |= 0x20;
+       if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
+               value &= ~0x02;
+       rt2800_bbp_write(rt2x00dev, 138, value);
+}
 
-       rt2800_bbp_read(rt2x00dev, 105, &value);
-       rt2x00_set_field8(&value, BBP105_MLD,
-                         rt2x00dev->default_ant.rx_chain_num == 2);
-       rt2800_bbp_write(rt2x00dev, 105, value);
+static void rt2800_init_bbp_305x_soc(struct rt2x00_dev *rt2x00dev)
+{
+       rt2800_bbp_write(rt2x00dev, 31, 0x08);
+
+       rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+       rt2800_bbp_write(rt2x00dev, 66, 0x38);
+
+       rt2800_bbp_write(rt2x00dev, 69, 0x12);
+       rt2800_bbp_write(rt2x00dev, 73, 0x10);
+
+       rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
+       rt2800_bbp_write(rt2x00dev, 78, 0x0e);
+       rt2800_bbp_write(rt2x00dev, 80, 0x08);
+
+       rt2800_bbp_write(rt2x00dev, 82, 0x62);
+
+       rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+
+       rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+       rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
+       rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
+       rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+       rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+
+       rt2800_bbp_write(rt2x00dev, 105, 0x01);
+
+       rt2800_bbp_write(rt2x00dev, 106, 0x35);
+}
+
+static void rt2800_init_bbp_28xx(struct rt2x00_dev *rt2x00dev)
+{
+       rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+       rt2800_bbp_write(rt2x00dev, 66, 0x38);
+
+       if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
+               rt2800_bbp_write(rt2x00dev, 69, 0x16);
+               rt2800_bbp_write(rt2x00dev, 73, 0x12);
+       } else {
+               rt2800_bbp_write(rt2x00dev, 69, 0x12);
+               rt2800_bbp_write(rt2x00dev, 73, 0x10);
+       }
+
+       rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
+       rt2800_bbp_write(rt2x00dev, 81, 0x37);
+
+       rt2800_bbp_write(rt2x00dev, 82, 0x62);
+
+       rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+
+       if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
+               rt2800_bbp_write(rt2x00dev, 84, 0x19);
+       else
+               rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+       rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
+       rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
+       rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+       rt2800_bbp_write(rt2x00dev, 103, 0x00);
+
+       rt2800_bbp_write(rt2x00dev, 105, 0x05);
+
+       rt2800_bbp_write(rt2x00dev, 106, 0x35);
+}
+
+static void rt2800_init_bbp_30xx(struct rt2x00_dev *rt2x00dev)
+{
+       rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+       rt2800_bbp_write(rt2x00dev, 66, 0x38);
+
+       rt2800_bbp_write(rt2x00dev, 69, 0x12);
+       rt2800_bbp_write(rt2x00dev, 73, 0x10);
+
+       rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
+       rt2800_bbp_write(rt2x00dev, 79, 0x13);
+       rt2800_bbp_write(rt2x00dev, 80, 0x05);
+       rt2800_bbp_write(rt2x00dev, 81, 0x33);
+
+       rt2800_bbp_write(rt2x00dev, 82, 0x62);
+
+       rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+
+       rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+       rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
+       rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
+       rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+       if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
+           rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
+           rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E))
+               rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+       else
+               rt2800_bbp_write(rt2x00dev, 103, 0x00);
+
+       rt2800_bbp_write(rt2x00dev, 105, 0x05);
+
+       rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+       if (rt2x00_rt(rt2x00dev, RT3071) ||
+           rt2x00_rt(rt2x00dev, RT3090))
+               rt2800_disable_unused_dac_adc(rt2x00dev);
+}
+
+static void rt2800_init_bbp_3290(struct rt2x00_dev *rt2x00dev)
+{
+       u8 value;
 
        rt2800_bbp4_mac_if_ctrl(rt2x00dev);
 
-       rt2800_bbp_write(rt2x00dev, 20, 0x06);
        rt2800_bbp_write(rt2x00dev, 31, 0x08);
-       rt2800_bbp_write(rt2x00dev, 65, 0x2C);
-       rt2800_bbp_write(rt2x00dev, 68, 0xDD);
-       rt2800_bbp_write(rt2x00dev, 69, 0x1A);
-       rt2800_bbp_write(rt2x00dev, 70, 0x05);
+
+       rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+       rt2800_bbp_write(rt2x00dev, 66, 0x38);
+
+       rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
+       rt2800_bbp_write(rt2x00dev, 69, 0x12);
        rt2800_bbp_write(rt2x00dev, 73, 0x13);
-       rt2800_bbp_write(rt2x00dev, 74, 0x0F);
-       rt2800_bbp_write(rt2x00dev, 75, 0x4F);
+       rt2800_bbp_write(rt2x00dev, 75, 0x46);
        rt2800_bbp_write(rt2x00dev, 76, 0x28);
+
+       rt2800_bbp_write(rt2x00dev, 77, 0x58);
+
+       rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
+       rt2800_bbp_write(rt2x00dev, 74, 0x0b);
+       rt2800_bbp_write(rt2x00dev, 79, 0x18);
+       rt2800_bbp_write(rt2x00dev, 80, 0x09);
+       rt2800_bbp_write(rt2x00dev, 81, 0x33);
+
+       rt2800_bbp_write(rt2x00dev, 82, 0x62);
+
+       rt2800_bbp_write(rt2x00dev, 83, 0x7a);
+
+       rt2800_bbp_write(rt2x00dev, 84, 0x9a);
+
+       rt2800_bbp_write(rt2x00dev, 86, 0x38);
+
+       rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
+       rt2800_bbp_write(rt2x00dev, 92, 0x02);
+
+       rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+
+       rt2800_bbp_write(rt2x00dev, 104, 0x92);
+
+       rt2800_bbp_write(rt2x00dev, 105, 0x1c);
+
+       rt2800_bbp_write(rt2x00dev, 106, 0x03);
+
+       rt2800_bbp_write(rt2x00dev, 128, 0x12);
+
+       rt2800_bbp_write(rt2x00dev, 67, 0x24);
+       rt2800_bbp_write(rt2x00dev, 143, 0x04);
+       rt2800_bbp_write(rt2x00dev, 142, 0x99);
+       rt2800_bbp_write(rt2x00dev, 150, 0x30);
+       rt2800_bbp_write(rt2x00dev, 151, 0x2e);
+       rt2800_bbp_write(rt2x00dev, 152, 0x20);
+       rt2800_bbp_write(rt2x00dev, 153, 0x34);
+       rt2800_bbp_write(rt2x00dev, 154, 0x40);
+       rt2800_bbp_write(rt2x00dev, 155, 0x3b);
+       rt2800_bbp_write(rt2x00dev, 253, 0x04);
+
+       rt2800_bbp_read(rt2x00dev, 47, &value);
+       rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1);
+       rt2800_bbp_write(rt2x00dev, 47, value);
+
+       /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */
+       rt2800_bbp_read(rt2x00dev, 3, &value);
+       rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1);
+       rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1);
+       rt2800_bbp_write(rt2x00dev, 3, value);
+}
+
+static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev)
+{
+       rt2800_bbp_write(rt2x00dev, 3, 0x00);
+       rt2800_bbp_write(rt2x00dev, 4, 0x50);
+
+       rt2800_bbp_write(rt2x00dev, 31, 0x08);
+
+       rt2800_bbp_write(rt2x00dev, 47, 0x48);
+
+       rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+       rt2800_bbp_write(rt2x00dev, 66, 0x38);
+
+       rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
+       rt2800_bbp_write(rt2x00dev, 69, 0x12);
+       rt2800_bbp_write(rt2x00dev, 73, 0x13);
+       rt2800_bbp_write(rt2x00dev, 75, 0x46);
+       rt2800_bbp_write(rt2x00dev, 76, 0x28);
+
        rt2800_bbp_write(rt2x00dev, 77, 0x59);
-       rt2800_bbp_write(rt2x00dev, 84, 0x9A);
+
+       rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
+       rt2800_bbp_write(rt2x00dev, 78, 0x0e);
+       rt2800_bbp_write(rt2x00dev, 80, 0x08);
+       rt2800_bbp_write(rt2x00dev, 81, 0x37);
+
+       rt2800_bbp_write(rt2x00dev, 82, 0x62);
+
+       rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+
+       rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
        rt2800_bbp_write(rt2x00dev, 86, 0x38);
+
        rt2800_bbp_write(rt2x00dev, 88, 0x90);
+
        rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
        rt2800_bbp_write(rt2x00dev, 92, 0x02);
-       rt2800_bbp_write(rt2x00dev, 95, 0x9a);
-       rt2800_bbp_write(rt2x00dev, 98, 0x12);
-       rt2800_bbp_write(rt2x00dev, 103, 0xC0);
+
+       rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+
        rt2800_bbp_write(rt2x00dev, 104, 0x92);
-       /* FIXME BBP105 owerwrite */
-       rt2800_bbp_write(rt2x00dev, 105, 0x3C);
-       rt2800_bbp_write(rt2x00dev, 106, 0x35);
-       rt2800_bbp_write(rt2x00dev, 128, 0x12);
-       rt2800_bbp_write(rt2x00dev, 134, 0xD0);
-       rt2800_bbp_write(rt2x00dev, 135, 0xF6);
-       rt2800_bbp_write(rt2x00dev, 137, 0x0F);
 
-       /* Initialize GLRT (Generalized Likehood Radio Test) */
-       rt2800_init_bbp_5592_glrt(rt2x00dev);
+       rt2800_bbp_write(rt2x00dev, 105, 0x34);
+
+       rt2800_bbp_write(rt2x00dev, 106, 0x05);
+
+       rt2800_bbp_write(rt2x00dev, 120, 0x50);
+
+       rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+
+       rt2800_bbp_write(rt2x00dev, 163, 0xbd);
+       /* Set ITxBF timeout to 0x9c40=1000msec */
+       rt2800_bbp_write(rt2x00dev, 179, 0x02);
+       rt2800_bbp_write(rt2x00dev, 180, 0x00);
+       rt2800_bbp_write(rt2x00dev, 182, 0x40);
+       rt2800_bbp_write(rt2x00dev, 180, 0x01);
+       rt2800_bbp_write(rt2x00dev, 182, 0x9c);
+       rt2800_bbp_write(rt2x00dev, 179, 0x00);
+       /* Reprogram the inband interface to put right values in RXWI */
+       rt2800_bbp_write(rt2x00dev, 142, 0x04);
+       rt2800_bbp_write(rt2x00dev, 143, 0x3b);
+       rt2800_bbp_write(rt2x00dev, 142, 0x06);
+       rt2800_bbp_write(rt2x00dev, 143, 0xa0);
+       rt2800_bbp_write(rt2x00dev, 142, 0x07);
+       rt2800_bbp_write(rt2x00dev, 143, 0xa1);
+       rt2800_bbp_write(rt2x00dev, 142, 0x08);
+       rt2800_bbp_write(rt2x00dev, 143, 0xa2);
+
+       rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+}
 
-       rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev)
+{
+       rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+       rt2800_bbp_write(rt2x00dev, 66, 0x38);
 
-       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
-       div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
-       ant = (div_mode == 3) ? 1 : 0;
-       rt2800_bbp_read(rt2x00dev, 152, &value);
-       if (ant == 0) {
-               /* Main antenna */
-               rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
-       } else {
-               /* Auxiliary antenna */
-               rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
-       }
-       rt2800_bbp_write(rt2x00dev, 152, value);
+       rt2800_bbp_write(rt2x00dev, 69, 0x12);
+       rt2800_bbp_write(rt2x00dev, 73, 0x10);
 
-       if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) {
-               rt2800_bbp_read(rt2x00dev, 254, &value);
-               rt2x00_set_field8(&value, BBP254_BIT7, 1);
-               rt2800_bbp_write(rt2x00dev, 254, value);
-       }
+       rt2800_bbp_write(rt2x00dev, 70, 0x0a);
 
-       rt2800_init_freq_calibration(rt2x00dev);
+       rt2800_bbp_write(rt2x00dev, 79, 0x13);
+       rt2800_bbp_write(rt2x00dev, 80, 0x05);
+       rt2800_bbp_write(rt2x00dev, 81, 0x33);
 
-       rt2800_bbp_write(rt2x00dev, 84, 0x19);
-       if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
+       rt2800_bbp_write(rt2x00dev, 82, 0x62);
+
+       rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+
+       rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+       rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
+       rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
+       rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+       if (rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E))
                rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+       else
+               rt2800_bbp_write(rt2x00dev, 103, 0x00);
+
+       rt2800_bbp_write(rt2x00dev, 105, 0x05);
+
+       rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+       rt2800_disable_unused_dac_adc(rt2x00dev);
 }
 
-static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
+static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev)
 {
-       unsigned int i;
-       u16 eeprom;
-       u8 reg_id;
-       u8 value;
+       rt2800_bbp_write(rt2x00dev, 31, 0x08);
 
-       if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
-                    rt2800_wait_bbp_ready(rt2x00dev)))
-               return -EACCES;
+       rt2800_bbp_write(rt2x00dev, 65, 0x2c);
+       rt2800_bbp_write(rt2x00dev, 66, 0x38);
 
-       if (rt2x00_rt(rt2x00dev, RT5592)) {
-               rt2800_init_bbp_5592(rt2x00dev);
-               return 0;
-       }
+       rt2800_bbp_write(rt2x00dev, 69, 0x12);
+       rt2800_bbp_write(rt2x00dev, 73, 0x10);
 
-       if (rt2x00_rt(rt2x00dev, RT3352)) {
-               rt2800_bbp_write(rt2x00dev, 3, 0x00);
-               rt2800_bbp_write(rt2x00dev, 4, 0x50);
-       }
+       rt2800_bbp_write(rt2x00dev, 70, 0x0a);
 
-       if (rt2x00_rt(rt2x00dev, RT3290) ||
-           rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392))
-               rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+       rt2800_bbp_write(rt2x00dev, 79, 0x13);
+       rt2800_bbp_write(rt2x00dev, 80, 0x05);
+       rt2800_bbp_write(rt2x00dev, 81, 0x33);
 
-       if (rt2800_is_305x_soc(rt2x00dev) ||
-           rt2x00_rt(rt2x00dev, RT3290) ||
-           rt2x00_rt(rt2x00dev, RT3352) ||
-           rt2x00_rt(rt2x00dev, RT3572) ||
-           rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392))
-               rt2800_bbp_write(rt2x00dev, 31, 0x08);
+       rt2800_bbp_write(rt2x00dev, 82, 0x62);
 
-       if (rt2x00_rt(rt2x00dev, RT3352))
-               rt2800_bbp_write(rt2x00dev, 47, 0x48);
+       rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+
+       rt2800_bbp_write(rt2x00dev, 84, 0x99);
+
+       rt2800_bbp_write(rt2x00dev, 86, 0x00);
+
+       rt2800_bbp_write(rt2x00dev, 91, 0x04);
+
+       rt2800_bbp_write(rt2x00dev, 92, 0x00);
+
+       rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+
+       rt2800_bbp_write(rt2x00dev, 105, 0x05);
+
+       rt2800_bbp_write(rt2x00dev, 106, 0x35);
+
+       rt2800_disable_unused_dac_adc(rt2x00dev);
+}
+
+static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
+{
+       int ant, div_mode;
+       u16 eeprom;
+       u8 value;
+
+       rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+       rt2800_bbp_write(rt2x00dev, 31, 0x08);
 
        rt2800_bbp_write(rt2x00dev, 65, 0x2c);
        rt2800_bbp_write(rt2x00dev, 66, 0x38);
 
-       if (rt2x00_rt(rt2x00dev, RT3290) ||
-           rt2x00_rt(rt2x00dev, RT3352) ||
-           rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392))
-               rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+       rt2800_bbp_write(rt2x00dev, 68, 0x0b);
 
-       if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
-               rt2800_bbp_write(rt2x00dev, 69, 0x16);
-               rt2800_bbp_write(rt2x00dev, 73, 0x12);
-       } else if (rt2x00_rt(rt2x00dev, RT3290) ||
-                  rt2x00_rt(rt2x00dev, RT3352) ||
-                  rt2x00_rt(rt2x00dev, RT5390) ||
-                  rt2x00_rt(rt2x00dev, RT5392)) {
-               rt2800_bbp_write(rt2x00dev, 69, 0x12);
-               rt2800_bbp_write(rt2x00dev, 73, 0x13);
-               rt2800_bbp_write(rt2x00dev, 75, 0x46);
-               rt2800_bbp_write(rt2x00dev, 76, 0x28);
+       rt2800_bbp_write(rt2x00dev, 69, 0x12);
+       rt2800_bbp_write(rt2x00dev, 73, 0x13);
+       rt2800_bbp_write(rt2x00dev, 75, 0x46);
+       rt2800_bbp_write(rt2x00dev, 76, 0x28);
 
-               if (rt2x00_rt(rt2x00dev, RT3290))
-                       rt2800_bbp_write(rt2x00dev, 77, 0x58);
-               else
-                       rt2800_bbp_write(rt2x00dev, 77, 0x59);
-       } else {
-               rt2800_bbp_write(rt2x00dev, 69, 0x12);
-               rt2800_bbp_write(rt2x00dev, 73, 0x10);
-       }
+       rt2800_bbp_write(rt2x00dev, 77, 0x59);
 
        rt2800_bbp_write(rt2x00dev, 70, 0x0a);
 
-       if (rt2x00_rt(rt2x00dev, RT3070) ||
-           rt2x00_rt(rt2x00dev, RT3071) ||
-           rt2x00_rt(rt2x00dev, RT3090) ||
-           rt2x00_rt(rt2x00dev, RT3390) ||
-           rt2x00_rt(rt2x00dev, RT3572) ||
-           rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392)) {
-               rt2800_bbp_write(rt2x00dev, 79, 0x13);
-               rt2800_bbp_write(rt2x00dev, 80, 0x05);
-               rt2800_bbp_write(rt2x00dev, 81, 0x33);
-       } else if (rt2800_is_305x_soc(rt2x00dev)) {
-               rt2800_bbp_write(rt2x00dev, 78, 0x0e);
-               rt2800_bbp_write(rt2x00dev, 80, 0x08);
-       } else if (rt2x00_rt(rt2x00dev, RT3290)) {
-               rt2800_bbp_write(rt2x00dev, 74, 0x0b);
-               rt2800_bbp_write(rt2x00dev, 79, 0x18);
-               rt2800_bbp_write(rt2x00dev, 80, 0x09);
-               rt2800_bbp_write(rt2x00dev, 81, 0x33);
-       } else if (rt2x00_rt(rt2x00dev, RT3352)) {
-               rt2800_bbp_write(rt2x00dev, 78, 0x0e);
-               rt2800_bbp_write(rt2x00dev, 80, 0x08);
-               rt2800_bbp_write(rt2x00dev, 81, 0x37);
-       } else {
-               rt2800_bbp_write(rt2x00dev, 81, 0x37);
-       }
+       rt2800_bbp_write(rt2x00dev, 79, 0x13);
+       rt2800_bbp_write(rt2x00dev, 80, 0x05);
+       rt2800_bbp_write(rt2x00dev, 81, 0x33);
 
        rt2800_bbp_write(rt2x00dev, 82, 0x62);
-       if (rt2x00_rt(rt2x00dev, RT3290) ||
-           rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392))
-               rt2800_bbp_write(rt2x00dev, 83, 0x7a);
-       else
-               rt2800_bbp_write(rt2x00dev, 83, 0x6a);
 
-       if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
-               rt2800_bbp_write(rt2x00dev, 84, 0x19);
-       else if (rt2x00_rt(rt2x00dev, RT3290) ||
-                rt2x00_rt(rt2x00dev, RT5390) ||
-                rt2x00_rt(rt2x00dev, RT5392))
-               rt2800_bbp_write(rt2x00dev, 84, 0x9a);
-       else
-               rt2800_bbp_write(rt2x00dev, 84, 0x99);
+       rt2800_bbp_write(rt2x00dev, 83, 0x7a);
 
-       if (rt2x00_rt(rt2x00dev, RT3290) ||
-           rt2x00_rt(rt2x00dev, RT3352) ||
-           rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392))
-               rt2800_bbp_write(rt2x00dev, 86, 0x38);
-       else
-               rt2800_bbp_write(rt2x00dev, 86, 0x00);
+       rt2800_bbp_write(rt2x00dev, 84, 0x9a);
 
-       if (rt2x00_rt(rt2x00dev, RT3352) ||
-           rt2x00_rt(rt2x00dev, RT5392))
+       rt2800_bbp_write(rt2x00dev, 86, 0x38);
+
+       if (rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 88, 0x90);
 
        rt2800_bbp_write(rt2x00dev, 91, 0x04);
 
-       if (rt2x00_rt(rt2x00dev, RT3290) ||
-           rt2x00_rt(rt2x00dev, RT3352) ||
-           rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392))
-               rt2800_bbp_write(rt2x00dev, 92, 0x02);
-       else
-               rt2800_bbp_write(rt2x00dev, 92, 0x00);
+       rt2800_bbp_write(rt2x00dev, 92, 0x02);
 
        if (rt2x00_rt(rt2x00dev, RT5392)) {
                rt2800_bbp_write(rt2x00dev, 95, 0x9a);
                rt2800_bbp_write(rt2x00dev, 98, 0x12);
        }
 
-       if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
-           rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
-           rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
-           rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
-           rt2x00_rt(rt2x00dev, RT3290) ||
-           rt2x00_rt(rt2x00dev, RT3352) ||
-           rt2x00_rt(rt2x00dev, RT3572) ||
-           rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392) ||
-           rt2800_is_305x_soc(rt2x00dev))
-               rt2800_bbp_write(rt2x00dev, 103, 0xc0);
-       else
-               rt2800_bbp_write(rt2x00dev, 103, 0x00);
+       rt2800_bbp_write(rt2x00dev, 103, 0xc0);
 
-       if (rt2x00_rt(rt2x00dev, RT3290) ||
-           rt2x00_rt(rt2x00dev, RT3352) ||
-           rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392))
-               rt2800_bbp_write(rt2x00dev, 104, 0x92);
+       rt2800_bbp_write(rt2x00dev, 104, 0x92);
 
-       if (rt2800_is_305x_soc(rt2x00dev))
-               rt2800_bbp_write(rt2x00dev, 105, 0x01);
-       else if (rt2x00_rt(rt2x00dev, RT3290))
-               rt2800_bbp_write(rt2x00dev, 105, 0x1c);
-       else if (rt2x00_rt(rt2x00dev, RT3352))
-               rt2800_bbp_write(rt2x00dev, 105, 0x34);
-       else if (rt2x00_rt(rt2x00dev, RT5390) ||
-                rt2x00_rt(rt2x00dev, RT5392))
-               rt2800_bbp_write(rt2x00dev, 105, 0x3c);
-       else
-               rt2800_bbp_write(rt2x00dev, 105, 0x05);
+       rt2800_bbp_write(rt2x00dev, 105, 0x3c);
 
-       if (rt2x00_rt(rt2x00dev, RT3290) ||
-           rt2x00_rt(rt2x00dev, RT5390))
+       if (rt2x00_rt(rt2x00dev, RT5390))
                rt2800_bbp_write(rt2x00dev, 106, 0x03);
-       else if (rt2x00_rt(rt2x00dev, RT3352))
-               rt2800_bbp_write(rt2x00dev, 106, 0x05);
        else if (rt2x00_rt(rt2x00dev, RT5392))
                rt2800_bbp_write(rt2x00dev, 106, 0x12);
        else
-               rt2800_bbp_write(rt2x00dev, 106, 0x35);
-
-       if (rt2x00_rt(rt2x00dev, RT3352))
-               rt2800_bbp_write(rt2x00dev, 120, 0x50);
+               WARN_ON(1);
 
-       if (rt2x00_rt(rt2x00dev, RT3290) ||
-           rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392))
-               rt2800_bbp_write(rt2x00dev, 128, 0x12);
+       rt2800_bbp_write(rt2x00dev, 128, 0x12);
 
        if (rt2x00_rt(rt2x00dev, RT5392)) {
                rt2800_bbp_write(rt2x00dev, 134, 0xd0);
                rt2800_bbp_write(rt2x00dev, 135, 0xf6);
        }
 
-       if (rt2x00_rt(rt2x00dev, RT3352))
-               rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+       rt2800_disable_unused_dac_adc(rt2x00dev);
 
-       if (rt2x00_rt(rt2x00dev, RT3071) ||
-           rt2x00_rt(rt2x00dev, RT3090) ||
-           rt2x00_rt(rt2x00dev, RT3390) ||
-           rt2x00_rt(rt2x00dev, RT3572) ||
-           rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392)) {
-               rt2800_bbp_read(rt2x00dev, 138, &value);
+       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+       div_mode = rt2x00_get_field16(eeprom,
+                                     EEPROM_NIC_CONF1_ANT_DIVERSITY);
+       ant = (div_mode == 3) ? 1 : 0;
 
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
-               if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
-                       value |= 0x20;
-               if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
-                       value &= ~0x02;
+       /* check if this is a Bluetooth combo card */
+       if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+               u32 reg;
 
-               rt2800_bbp_write(rt2x00dev, 138, value);
+               rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+               rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
+               rt2x00_set_field32(&reg, GPIO_CTRL_DIR6, 0);
+               rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 0);
+               rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 0);
+               if (ant == 0)
+                       rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 1);
+               else if (ant == 1)
+                       rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 1);
+               rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
        }
 
-       if (rt2x00_rt(rt2x00dev, RT3290)) {
-               rt2800_bbp_write(rt2x00dev, 67, 0x24);
-               rt2800_bbp_write(rt2x00dev, 143, 0x04);
-               rt2800_bbp_write(rt2x00dev, 142, 0x99);
-               rt2800_bbp_write(rt2x00dev, 150, 0x30);
-               rt2800_bbp_write(rt2x00dev, 151, 0x2e);
-               rt2800_bbp_write(rt2x00dev, 152, 0x20);
-               rt2800_bbp_write(rt2x00dev, 153, 0x34);
-               rt2800_bbp_write(rt2x00dev, 154, 0x40);
-               rt2800_bbp_write(rt2x00dev, 155, 0x3b);
-               rt2800_bbp_write(rt2x00dev, 253, 0x04);
-
-               rt2800_bbp_read(rt2x00dev, 47, &value);
-               rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1);
-               rt2800_bbp_write(rt2x00dev, 47, value);
-
-               /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */
-               rt2800_bbp_read(rt2x00dev, 3, &value);
-               rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1);
-               rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1);
-               rt2800_bbp_write(rt2x00dev, 3, value);
+       /* This chip has hardware antenna diversity*/
+       if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
+               rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */
+               rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */
+               rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */
        }
 
-       if (rt2x00_rt(rt2x00dev, RT3352)) {
-               rt2800_bbp_write(rt2x00dev, 163, 0xbd);
-               /* Set ITxBF timeout to 0x9c40=1000msec */
-               rt2800_bbp_write(rt2x00dev, 179, 0x02);
-               rt2800_bbp_write(rt2x00dev, 180, 0x00);
-               rt2800_bbp_write(rt2x00dev, 182, 0x40);
-               rt2800_bbp_write(rt2x00dev, 180, 0x01);
-               rt2800_bbp_write(rt2x00dev, 182, 0x9c);
-               rt2800_bbp_write(rt2x00dev, 179, 0x00);
-               /* Reprogram the inband interface to put right values in RXWI */
-               rt2800_bbp_write(rt2x00dev, 142, 0x04);
-               rt2800_bbp_write(rt2x00dev, 143, 0x3b);
-               rt2800_bbp_write(rt2x00dev, 142, 0x06);
-               rt2800_bbp_write(rt2x00dev, 143, 0xa0);
-               rt2800_bbp_write(rt2x00dev, 142, 0x07);
-               rt2800_bbp_write(rt2x00dev, 143, 0xa1);
-               rt2800_bbp_write(rt2x00dev, 142, 0x08);
-               rt2800_bbp_write(rt2x00dev, 143, 0xa2);
-
-               rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+       rt2800_bbp_read(rt2x00dev, 152, &value);
+       if (ant == 0)
+               rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
+       else
+               rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
+       rt2800_bbp_write(rt2x00dev, 152, value);
+
+       rt2800_init_freq_calibration(rt2x00dev);
+}
+
+static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
+{
+       int ant, div_mode;
+       u16 eeprom;
+       u8 value;
+
+       rt2800_init_bbp_early(rt2x00dev);
+
+       rt2800_bbp_read(rt2x00dev, 105, &value);
+       rt2x00_set_field8(&value, BBP105_MLD,
+                         rt2x00dev->default_ant.rx_chain_num == 2);
+       rt2800_bbp_write(rt2x00dev, 105, value);
+
+       rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+       rt2800_bbp_write(rt2x00dev, 20, 0x06);
+       rt2800_bbp_write(rt2x00dev, 31, 0x08);
+       rt2800_bbp_write(rt2x00dev, 65, 0x2C);
+       rt2800_bbp_write(rt2x00dev, 68, 0xDD);
+       rt2800_bbp_write(rt2x00dev, 69, 0x1A);
+       rt2800_bbp_write(rt2x00dev, 70, 0x05);
+       rt2800_bbp_write(rt2x00dev, 73, 0x13);
+       rt2800_bbp_write(rt2x00dev, 74, 0x0F);
+       rt2800_bbp_write(rt2x00dev, 75, 0x4F);
+       rt2800_bbp_write(rt2x00dev, 76, 0x28);
+       rt2800_bbp_write(rt2x00dev, 77, 0x59);
+       rt2800_bbp_write(rt2x00dev, 84, 0x9A);
+       rt2800_bbp_write(rt2x00dev, 86, 0x38);
+       rt2800_bbp_write(rt2x00dev, 88, 0x90);
+       rt2800_bbp_write(rt2x00dev, 91, 0x04);
+       rt2800_bbp_write(rt2x00dev, 92, 0x02);
+       rt2800_bbp_write(rt2x00dev, 95, 0x9a);
+       rt2800_bbp_write(rt2x00dev, 98, 0x12);
+       rt2800_bbp_write(rt2x00dev, 103, 0xC0);
+       rt2800_bbp_write(rt2x00dev, 104, 0x92);
+       /* FIXME BBP105 owerwrite */
+       rt2800_bbp_write(rt2x00dev, 105, 0x3C);
+       rt2800_bbp_write(rt2x00dev, 106, 0x35);
+       rt2800_bbp_write(rt2x00dev, 128, 0x12);
+       rt2800_bbp_write(rt2x00dev, 134, 0xD0);
+       rt2800_bbp_write(rt2x00dev, 135, 0xF6);
+       rt2800_bbp_write(rt2x00dev, 137, 0x0F);
+
+       /* Initialize GLRT (Generalized Likehood Radio Test) */
+       rt2800_init_bbp_5592_glrt(rt2x00dev);
+
+       rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+       rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+       div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
+       ant = (div_mode == 3) ? 1 : 0;
+       rt2800_bbp_read(rt2x00dev, 152, &value);
+       if (ant == 0) {
+               /* Main antenna */
+               rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
+       } else {
+               /* Auxiliary antenna */
+               rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
        }
+       rt2800_bbp_write(rt2x00dev, 152, value);
 
-       if (rt2x00_rt(rt2x00dev, RT5390) ||
-           rt2x00_rt(rt2x00dev, RT5392)) {
-               int ant, div_mode;
+       if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) {
+               rt2800_bbp_read(rt2x00dev, 254, &value);
+               rt2x00_set_field8(&value, BBP254_BIT7, 1);
+               rt2800_bbp_write(rt2x00dev, 254, value);
+       }
 
-               rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
-               div_mode = rt2x00_get_field16(eeprom,
-                                             EEPROM_NIC_CONF1_ANT_DIVERSITY);
-               ant = (div_mode == 3) ? 1 : 0;
+       rt2800_init_freq_calibration(rt2x00dev);
 
-               /* check if this is a Bluetooth combo card */
-               if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
-                       u32 reg;
-
-                       rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
-                       rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0);
-                       rt2x00_set_field32(&reg, GPIO_CTRL_DIR6, 0);
-                       rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 0);
-                       rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 0);
-                       if (ant == 0)
-                               rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 1);
-                       else if (ant == 1)
-                               rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 1);
-                       rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
-               }
+       rt2800_bbp_write(rt2x00dev, 84, 0x19);
+       if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
+               rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+}
 
-               /* This chip has hardware antenna diversity*/
-               if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
-                       rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */
-                       rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */
-                       rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */
-               }
+static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
+{
+       unsigned int i;
+       u16 eeprom;
+       u8 reg_id;
+       u8 value;
 
-               rt2800_bbp_read(rt2x00dev, 152, &value);
-               if (ant == 0)
-                       rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
-               else
-                       rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
-               rt2800_bbp_write(rt2x00dev, 152, value);
+       if (rt2800_is_305x_soc(rt2x00dev))
+               rt2800_init_bbp_305x_soc(rt2x00dev);
 
-               rt2800_init_freq_calibration(rt2x00dev);
+       switch (rt2x00dev->chip.rt) {
+       case RT2860:
+       case RT2872:
+       case RT2883:
+               rt2800_init_bbp_28xx(rt2x00dev);
+               break;
+       case RT3070:
+       case RT3071:
+       case RT3090:
+               rt2800_init_bbp_30xx(rt2x00dev);
+               break;
+       case RT3290:
+               rt2800_init_bbp_3290(rt2x00dev);
+               break;
+       case RT3352:
+               rt2800_init_bbp_3352(rt2x00dev);
+               break;
+       case RT3390:
+               rt2800_init_bbp_3390(rt2x00dev);
+               break;
+       case RT3572:
+               rt2800_init_bbp_3572(rt2x00dev);
+               break;
+       case RT5390:
+       case RT5392:
+               rt2800_init_bbp_53xx(rt2x00dev);
+               break;
+       case RT5592:
+               rt2800_init_bbp_5592(rt2x00dev);
+               return;
        }
 
        for (i = 0; i < EEPROM_BBP_SIZE; i++) {
@@ -4344,8 +4542,6 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                        rt2800_bbp_write(rt2x00dev, reg_id, value);
                }
        }
-
-       return 0;
 }
 
 static void rt2800_led_open_drain_enable(struct rt2x00_dev *rt2x00dev)
@@ -5196,9 +5392,11 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
        }
        msleep(1);
 
-       if (unlikely(rt2800_init_bbp(rt2x00dev)))
+       if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
+                    rt2800_wait_bbp_ready(rt2x00dev)))
                return -EIO;
 
+       rt2800_init_bbp(rt2x00dev);
        rt2800_init_rfcsr(rt2x00dev);
 
        if (rt2x00_is_usb(rt2x00dev) &&
@@ -6056,8 +6254,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
                default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
 
                for (i = 14; i < spec->num_channels; i++) {
-                       info[i].default_power1 = default_power1[i];
-                       info[i].default_power2 = default_power2[i];
+                       info[i].default_power1 = default_power1[i - 14];
+                       info[i].default_power2 = default_power2[i - 14];
                }
        }
 
index 6f4a861af336cc22ac1461dceb7dc4eef7deb45d..00055627eb8de41282a8248f0ad855a87fe00c86 100644 (file)
@@ -637,6 +637,7 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
        struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
        __le32 *txd = entry_priv->desc;
        u32 word;
+       const unsigned int txwi_size = entry->queue->winfo_size;
 
        /*
         * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
@@ -659,14 +660,14 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
                           !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
        rt2x00_set_field32(&word, TXD_W1_BURST,
                           test_bit(ENTRY_TXD_BURST, &txdesc->flags));
-       rt2x00_set_field32(&word, TXD_W1_SD_LEN0, TXWI_DESC_SIZE);
+       rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
        rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
        rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
        rt2x00_desc_write(txd, 1, word);
 
        word = 0;
        rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
-                          skbdesc->skb_dma + TXWI_DESC_SIZE);
+                          skbdesc->skb_dma + txwi_size);
        rt2x00_desc_write(txd, 2, word);
 
        word = 0;
@@ -1014,7 +1015,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
         * Since we have only one producer and one consumer we don't
         * need to lock the kfifo.
         */
-       for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
+       for (i = 0; i < rt2x00dev->tx->limit; i++) {
                rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
 
                if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
@@ -1186,29 +1187,43 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
        .sta_remove             = rt2800_sta_remove,
 };
 
-static const struct data_queue_desc rt2800pci_queue_rx = {
-       .entry_num              = 128,
-       .data_size              = AGGREGATION_SIZE,
-       .desc_size              = RXD_DESC_SIZE,
-       .winfo_size             = RXWI_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+static void rt2800pci_queue_init(struct data_queue *queue)
+{
+       switch (queue->qid) {
+       case QID_RX:
+               queue->limit = 128;
+               queue->data_size = AGGREGATION_SIZE;
+               queue->desc_size = RXD_DESC_SIZE;
+               queue->winfo_size = RXWI_DESC_SIZE_4WORDS;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
 
-static const struct data_queue_desc rt2800pci_queue_tx = {
-       .entry_num              = 64,
-       .data_size              = AGGREGATION_SIZE,
-       .desc_size              = TXD_DESC_SIZE,
-       .winfo_size             = TXWI_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+       case QID_AC_VO:
+       case QID_AC_VI:
+       case QID_AC_BE:
+       case QID_AC_BK:
+               queue->limit = 64;
+               queue->data_size = AGGREGATION_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
 
-static const struct data_queue_desc rt2800pci_queue_bcn = {
-       .entry_num              = 8,
-       .data_size              = 0, /* No DMA required for beacons */
-       .desc_size              = TXD_DESC_SIZE,
-       .winfo_size             = TXWI_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+       case QID_BEACON:
+               queue->limit = 8;
+               queue->data_size = 0; /* No DMA required for beacons */
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
+
+       case QID_ATIM:
+               /* fallthrough */
+       default:
+               BUG();
+               break;
+       }
+}
 
 static const struct rt2x00_ops rt2800pci_ops = {
        .name                   = KBUILD_MODNAME,
@@ -1217,10 +1232,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
        .tx_queues              = NUM_TX_QUEUES,
-       .extra_tx_headroom      = TXWI_DESC_SIZE,
-       .rx                     = &rt2800pci_queue_rx,
-       .tx                     = &rt2800pci_queue_tx,
-       .bcn                    = &rt2800pci_queue_bcn,
+       .queue_init             = rt2800pci_queue_init,
        .lib                    = &rt2800pci_rt2x00_ops,
        .drv                    = &rt2800pci_rt2800_ops,
        .hw                     = &rt2800pci_mac80211_ops,
index ac854d75bd6cb33bd32f86e199b10e57b775d3ce..840833b26bfaeeda520a5ce29fba81bf839af284 100644 (file)
@@ -327,7 +327,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
         * this limit so reduce the number to prevent errors.
         */
        rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT,
-                          ((rt2x00dev->ops->rx->entry_num * DATA_FRAME_SIZE)
+                          ((rt2x00dev->rx->limit * DATA_FRAME_SIZE)
                            / 1024) - 3);
        rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
        rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
@@ -849,85 +849,63 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
        .sta_remove             = rt2800_sta_remove,
 };
 
-static const struct data_queue_desc rt2800usb_queue_rx = {
-       .entry_num              = 128,
-       .data_size              = AGGREGATION_SIZE,
-       .desc_size              = RXINFO_DESC_SIZE,
-       .winfo_size             = RXWI_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_usb),
-};
-
-static const struct data_queue_desc rt2800usb_queue_tx = {
-       .entry_num              = 16,
-       .data_size              = AGGREGATION_SIZE,
-       .desc_size              = TXINFO_DESC_SIZE,
-       .winfo_size             = TXWI_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_usb),
-};
-
-static const struct data_queue_desc rt2800usb_queue_bcn = {
-       .entry_num              = 8,
-       .data_size              = MGMT_FRAME_SIZE,
-       .desc_size              = TXINFO_DESC_SIZE,
-       .winfo_size             = TXWI_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_usb),
-};
+static void rt2800usb_queue_init(struct data_queue *queue)
+{
+       struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+       unsigned short txwi_size, rxwi_size;
 
-static const struct rt2x00_ops rt2800usb_ops = {
-       .name                   = KBUILD_MODNAME,
-       .drv_data_size          = sizeof(struct rt2800_drv_data),
-       .max_ap_intf            = 8,
-       .eeprom_size            = EEPROM_SIZE,
-       .rf_size                = RF_SIZE,
-       .tx_queues              = NUM_TX_QUEUES,
-       .extra_tx_headroom      = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
-       .rx                     = &rt2800usb_queue_rx,
-       .tx                     = &rt2800usb_queue_tx,
-       .bcn                    = &rt2800usb_queue_bcn,
-       .lib                    = &rt2800usb_rt2x00_ops,
-       .drv                    = &rt2800usb_rt2800_ops,
-       .hw                     = &rt2800usb_mac80211_ops,
-#ifdef CONFIG_RT2X00_LIB_DEBUGFS
-       .debugfs                = &rt2800_rt2x00debug,
-#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
-};
+       if (rt2x00_rt(rt2x00dev, RT5592)) {
+               txwi_size = TXWI_DESC_SIZE_5WORDS;
+               rxwi_size = RXWI_DESC_SIZE_6WORDS;
+       } else {
+               txwi_size = TXWI_DESC_SIZE_4WORDS;
+               rxwi_size = RXWI_DESC_SIZE_4WORDS;
+       }
 
-static const struct data_queue_desc rt2800usb_queue_rx_5592 = {
-       .entry_num              = 128,
-       .data_size              = AGGREGATION_SIZE,
-       .desc_size              = RXINFO_DESC_SIZE,
-       .winfo_size             = RXWI_DESC_SIZE_5592,
-       .priv_size              = sizeof(struct queue_entry_priv_usb),
-};
+       switch (queue->qid) {
+       case QID_RX:
+               queue->limit = 128;
+               queue->data_size = AGGREGATION_SIZE;
+               queue->desc_size = RXINFO_DESC_SIZE;
+               queue->winfo_size = rxwi_size;
+               queue->priv_size = sizeof(struct queue_entry_priv_usb);
+               break;
 
-static const struct data_queue_desc rt2800usb_queue_tx_5592 = {
-       .entry_num              = 16,
-       .data_size              = AGGREGATION_SIZE,
-       .desc_size              = TXINFO_DESC_SIZE,
-       .winfo_size             = TXWI_DESC_SIZE_5592,
-       .priv_size              = sizeof(struct queue_entry_priv_usb),
-};
+       case QID_AC_VO:
+       case QID_AC_VI:
+       case QID_AC_BE:
+       case QID_AC_BK:
+               queue->limit = 16;
+               queue->data_size = AGGREGATION_SIZE;
+               queue->desc_size = TXINFO_DESC_SIZE;
+               queue->winfo_size = txwi_size;
+               queue->priv_size = sizeof(struct queue_entry_priv_usb);
+               break;
 
-static const struct data_queue_desc rt2800usb_queue_bcn_5592 = {
-       .entry_num              = 8,
-       .data_size              = MGMT_FRAME_SIZE,
-       .desc_size              = TXINFO_DESC_SIZE,
-       .winfo_size             = TXWI_DESC_SIZE_5592,
-       .priv_size              = sizeof(struct queue_entry_priv_usb),
-};
+       case QID_BEACON:
+               queue->limit = 8;
+               queue->data_size = MGMT_FRAME_SIZE;
+               queue->desc_size = TXINFO_DESC_SIZE;
+               queue->winfo_size = txwi_size;
+               queue->priv_size = sizeof(struct queue_entry_priv_usb);
+               break;
 
+       case QID_ATIM:
+               /* fallthrough */
+       default:
+               BUG();
+               break;
+       }
+}
 
-static const struct rt2x00_ops rt2800usb_ops_5592 = {
+static const struct rt2x00_ops rt2800usb_ops = {
        .name                   = KBUILD_MODNAME,
        .drv_data_size          = sizeof(struct rt2800_drv_data),
        .max_ap_intf            = 8,
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
        .tx_queues              = NUM_TX_QUEUES,
-       .extra_tx_headroom      = TXINFO_DESC_SIZE + TXWI_DESC_SIZE_5592,
-       .rx                     = &rt2800usb_queue_rx_5592,
-       .tx                     = &rt2800usb_queue_tx_5592,
-       .bcn                    = &rt2800usb_queue_bcn_5592,
+       .queue_init             = rt2800usb_queue_init,
        .lib                    = &rt2800usb_rt2x00_ops,
        .drv                    = &rt2800usb_rt2800_ops,
        .hw                     = &rt2800usb_mac80211_ops,
@@ -1248,15 +1226,15 @@ static struct usb_device_id rt2800usb_device_table[] = {
 #endif
 #ifdef CONFIG_RT2800USB_RT55XX
        /* Arcadyan */
-       { USB_DEVICE(0x043e, 0x7a32), .driver_info = 5592 },
+       { USB_DEVICE(0x043e, 0x7a32) },
        /* AVM GmbH */
-       { USB_DEVICE(0x057c, 0x8501), .driver_info = 5592 },
+       { USB_DEVICE(0x057c, 0x8501) },
        /* D-Link DWA-160-B2 */
-       { USB_DEVICE(0x2001, 0x3c1a), .driver_info = 5592 },
+       { USB_DEVICE(0x2001, 0x3c1a) },
        /* Proware */
-       { USB_DEVICE(0x043e, 0x7a13), .driver_info = 5592 },
+       { USB_DEVICE(0x043e, 0x7a13) },
        /* Ralink */
-       { USB_DEVICE(0x148f, 0x5572), .driver_info = 5592 },
+       { USB_DEVICE(0x148f, 0x5572) },
 #endif
 #ifdef CONFIG_RT2800USB_UNKNOWN
        /*
@@ -1361,9 +1339,6 @@ MODULE_LICENSE("GPL");
 static int rt2800usb_probe(struct usb_interface *usb_intf,
                           const struct usb_device_id *id)
 {
-       if (id->driver_info == 5592)
-               return rt2x00usb_probe(usb_intf, &rt2800usb_ops_5592);
-
        return rt2x00usb_probe(usb_intf, &rt2800usb_ops);
 }
 
index 7510723a8c378389b93b90001efe088118f045bb..ee3fc570b11d04b0a6531f182abb1113dcd9be93 100644 (file)
@@ -648,11 +648,7 @@ struct rt2x00_ops {
        const unsigned int eeprom_size;
        const unsigned int rf_size;
        const unsigned int tx_queues;
-       const unsigned int extra_tx_headroom;
-       const struct data_queue_desc *rx;
-       const struct data_queue_desc *tx;
-       const struct data_queue_desc *bcn;
-       const struct data_queue_desc *atim;
+       void (*queue_init)(struct data_queue *queue);
        const struct rt2x00lib_ops *lib;
        const void *drv;
        const struct ieee80211_ops *hw;
@@ -1010,6 +1006,9 @@ struct rt2x00_dev {
         */
        struct list_head bar_list;
        spinlock_t bar_list_lock;
+
+       /* Extra TX headroom required for alignment purposes. */
+       unsigned int extra_tx_headroom;
 };
 
 struct rt2x00_bar_list_entry {
index 90dc14336980c2bcc344a4c5e97d8e4033fa9d95..f03e3bba51c340e37469888ca77e8613b77412aa 100644 (file)
@@ -334,7 +334,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
        /*
         * Remove the extra tx headroom from the skb.
         */
-       skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom);
+       skb_pull(entry->skb, rt2x00dev->extra_tx_headroom);
 
        /*
         * Signal that the TX descriptor is no longer in the skb.
@@ -1049,7 +1049,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
         */
        rt2x00dev->hw->extra_tx_headroom =
                max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM,
-                     rt2x00dev->ops->extra_tx_headroom);
+                     rt2x00dev->extra_tx_headroom);
 
        /*
         * Take TX headroom required for alignment into account.
@@ -1077,7 +1077,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
                 */
                int kfifo_size =
                        roundup_pow_of_two(rt2x00dev->ops->tx_queues *
-                                          rt2x00dev->ops->tx->entry_num *
+                                          rt2x00dev->tx->limit *
                                           sizeof(u32));
 
                status = kfifo_alloc(&rt2x00dev->txstatus_fifo, kfifo_size,
@@ -1256,6 +1256,17 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
        rt2x00dev->hw->wiphy->n_iface_combinations = 1;
 }
 
+static unsigned int rt2x00dev_extra_tx_headroom(struct rt2x00_dev *rt2x00dev)
+{
+       if (WARN_ON(!rt2x00dev->tx))
+               return 0;
+
+       if (rt2x00_is_usb(rt2x00dev))
+               return rt2x00dev->tx[0].winfo_size + rt2x00dev->tx[0].desc_size;
+
+       return rt2x00dev->tx[0].winfo_size;
+}
+
 /*
  * driver allocation handlers.
  */
@@ -1300,23 +1311,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
        rt2x00dev->hw->wiphy->addr_mask[ETH_ALEN - 1] =
                (rt2x00dev->ops->max_ap_intf - 1);
 
-       /*
-        * Determine which operating modes are supported, all modes
-        * which require beaconing, depend on the availability of
-        * beacon entries.
-        */
-       rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
-       if (rt2x00dev->ops->bcn->entry_num > 0)
-               rt2x00dev->hw->wiphy->interface_modes |=
-                   BIT(NL80211_IFTYPE_ADHOC) |
-                   BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
-                   BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
-                   BIT(NL80211_IFTYPE_WDS);
-
-       rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
-
        /*
         * Initialize work.
         */
@@ -1347,6 +1341,26 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
        if (retval)
                goto exit;
 
+       /* Cache TX headroom value */
+       rt2x00dev->extra_tx_headroom = rt2x00dev_extra_tx_headroom(rt2x00dev);
+
+       /*
+        * Determine which operating modes are supported, all modes
+        * which require beaconing, depend on the availability of
+        * beacon entries.
+        */
+       rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+       if (rt2x00dev->bcn->limit > 0)
+               rt2x00dev->hw->wiphy->interface_modes |=
+                   BIT(NL80211_IFTYPE_ADHOC) |
+                   BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+                   BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+                   BIT(NL80211_IFTYPE_WDS);
+
+       rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
        /*
         * Initialize ieee80211 structure.
         */
index dc49e525ae5e55e9a090308b80122998e08857d8..76d95deb274be56feb9803adb5b8d198f0c35bd0 100644 (file)
@@ -105,11 +105,13 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
                goto exit_release_regions;
        }
 
+       pci_enable_msi(pci_dev);
+
        hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
        if (!hw) {
                rt2x00_probe_err("Failed to allocate hardware\n");
                retval = -ENOMEM;
-               goto exit_release_regions;
+               goto exit_disable_msi;
        }
 
        pci_set_drvdata(pci_dev, hw);
@@ -150,6 +152,9 @@ exit_free_reg:
 exit_free_device:
        ieee80211_free_hw(hw);
 
+exit_disable_msi:
+       pci_disable_msi(pci_dev);
+
 exit_release_regions:
        pci_release_regions(pci_dev);
 
@@ -174,6 +179,8 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
        rt2x00pci_free_reg(rt2x00dev);
        ieee80211_free_hw(hw);
 
+       pci_disable_msi(pci_dev);
+
        /*
         * Free the PCI device data.
         */
index 2c12311467a99cd634c7b204eaccc36eb75d2aaa..6c0a91ff963c6833bb7d325cac7954c7c10b41fc 100644 (file)
@@ -542,8 +542,8 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
        /*
         * Add the requested extra tx headroom in front of the skb.
         */
-       skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
-       memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
+       skb_push(entry->skb, rt2x00dev->extra_tx_headroom);
+       memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom);
 
        /*
         * Call the driver's write_tx_data function, if it exists.
@@ -596,7 +596,7 @@ static void rt2x00queue_bar_check(struct queue_entry *entry)
 {
        struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
        struct ieee80211_bar *bar = (void *) (entry->skb->data +
-                                   rt2x00dev->ops->extra_tx_headroom);
+                                   rt2x00dev->extra_tx_headroom);
        struct rt2x00_bar_list_entry *bar_entry;
 
        if (likely(!ieee80211_is_back_req(bar->frame_control)))
@@ -1161,8 +1161,7 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
        }
 }
 
-static int rt2x00queue_alloc_entries(struct data_queue *queue,
-                                    const struct data_queue_desc *qdesc)
+static int rt2x00queue_alloc_entries(struct data_queue *queue)
 {
        struct queue_entry *entries;
        unsigned int entry_size;
@@ -1170,16 +1169,10 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
 
        rt2x00queue_reset(queue);
 
-       queue->limit = qdesc->entry_num;
-       queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
-       queue->data_size = qdesc->data_size;
-       queue->desc_size = qdesc->desc_size;
-       queue->winfo_size = qdesc->winfo_size;
-
        /*
         * Allocate all queue entries.
         */
-       entry_size = sizeof(*entries) + qdesc->priv_size;
+       entry_size = sizeof(*entries) + queue->priv_size;
        entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
        if (!entries)
                return -ENOMEM;
@@ -1195,7 +1188,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
                entries[i].entry_idx = i;
                entries[i].priv_data =
                    QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
-                                           sizeof(*entries), qdesc->priv_size);
+                                           sizeof(*entries), queue->priv_size);
        }
 
 #undef QUEUE_ENTRY_PRIV_OFFSET
@@ -1237,23 +1230,22 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
        struct data_queue *queue;
        int status;
 
-       status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
+       status = rt2x00queue_alloc_entries(rt2x00dev->rx);
        if (status)
                goto exit;
 
        tx_queue_for_each(rt2x00dev, queue) {
-               status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
+               status = rt2x00queue_alloc_entries(queue);
                if (status)
                        goto exit;
        }
 
-       status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
+       status = rt2x00queue_alloc_entries(rt2x00dev->bcn);
        if (status)
                goto exit;
 
        if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
-               status = rt2x00queue_alloc_entries(rt2x00dev->atim,
-                                                  rt2x00dev->ops->atim);
+               status = rt2x00queue_alloc_entries(rt2x00dev->atim);
                if (status)
                        goto exit;
        }
@@ -1297,6 +1289,10 @@ static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
        queue->aifs = 2;
        queue->cw_min = 5;
        queue->cw_max = 10;
+
+       rt2x00dev->ops->queue_init(queue);
+
+       queue->threshold = DIV_ROUND_UP(queue->limit, 10);
 }
 
 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
index 4a7b34e9261bde758f560d0a077c8f7f7b1251bf..ebe11722497995a88d941acaa9749cef8fb83e39 100644 (file)
@@ -453,6 +453,7 @@ enum data_queue_flags {
  * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
  * @data_size: Maximum data size for the frames in this queue.
  * @desc_size: Hardware descriptor size for the data in this queue.
+ * @priv_size: Size of per-queue_entry private data.
  * @usb_endpoint: Device endpoint used for communication (USB only)
  * @usb_maxpacket: Max packet size for given endpoint (USB only)
  */
@@ -481,30 +482,12 @@ struct data_queue {
        unsigned short data_size;
        unsigned char  desc_size;
        unsigned char  winfo_size;
+       unsigned short priv_size;
 
        unsigned short usb_endpoint;
        unsigned short usb_maxpacket;
 };
 
-/**
- * struct data_queue_desc: Data queue description
- *
- * The information in this structure is used by drivers
- * to inform rt2x00lib about the creation of the data queue.
- *
- * @entry_num: Maximum number of entries for a queue.
- * @data_size: Maximum data size for the frames in this queue.
- * @desc_size: Hardware descriptor size for the data in this queue.
- * @priv_size: Size of per-queue_entry private data.
- */
-struct data_queue_desc {
-       unsigned short entry_num;
-       unsigned short data_size;
-       unsigned char  desc_size;
-       unsigned char  winfo_size;
-       unsigned short priv_size;
-};
-
 /**
  * queue_end - Return pointer to the last queue (HELPER MACRO).
  * @__dev: Pointer to &struct rt2x00_dev
index 0dc8180e251bf0c1ff33e917db2f94bf15d7c927..54d3ddfc988845cfa844b5c1d76c6a78ec62e4de 100644 (file)
@@ -2175,7 +2175,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
         * that the TX_STA_FIFO stack has a size of 16. We stick to our
         * tx ring size for now.
         */
-       for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
+       for (i = 0; i < rt2x00dev->tx->limit; i++) {
                rt2x00mmio_register_read(rt2x00dev, STA_CSR4, &reg);
                if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
                        break;
@@ -2825,7 +2825,8 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
                tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
                for (i = 14; i < spec->num_channels; i++) {
                        info[i].max_power = MAX_TXPOWER;
-                       info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+                       info[i].default_power1 =
+                                       TXPOWER_FROM_DEV(tx_power[i - 14]);
                }
        }
 
@@ -3025,26 +3026,40 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
        .config                 = rt61pci_config,
 };
 
-static const struct data_queue_desc rt61pci_queue_rx = {
-       .entry_num              = 32,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = RXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+static void rt61pci_queue_init(struct data_queue *queue)
+{
+       switch (queue->qid) {
+       case QID_RX:
+               queue->limit = 32;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = RXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
 
-static const struct data_queue_desc rt61pci_queue_tx = {
-       .entry_num              = 32,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = TXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+       case QID_AC_VO:
+       case QID_AC_VI:
+       case QID_AC_BE:
+       case QID_AC_BK:
+               queue->limit = 32;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
 
-static const struct data_queue_desc rt61pci_queue_bcn = {
-       .entry_num              = 4,
-       .data_size              = 0, /* No DMA required for beacons */
-       .desc_size              = TXINFO_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_mmio),
-};
+       case QID_BEACON:
+               queue->limit = 4;
+               queue->data_size = 0; /* No DMA required for beacons */
+               queue->desc_size = TXINFO_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
+
+       case QID_ATIM:
+               /* fallthrough */
+       default:
+               BUG();
+               break;
+       }
+}
 
 static const struct rt2x00_ops rt61pci_ops = {
        .name                   = KBUILD_MODNAME,
@@ -3052,10 +3067,7 @@ static const struct rt2x00_ops rt61pci_ops = {
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
        .tx_queues              = NUM_TX_QUEUES,
-       .extra_tx_headroom      = 0,
-       .rx                     = &rt61pci_queue_rx,
-       .tx                     = &rt61pci_queue_tx,
-       .bcn                    = &rt61pci_queue_bcn,
+       .queue_init             = rt61pci_queue_init,
        .lib                    = &rt61pci_rt2x00_ops,
        .hw                     = &rt61pci_mac80211_ops,
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
index 377e09bb0b812cb9c269678a3b6f55696140e48d..1d3880e09a13eabb73e93c77b7af948ca12421f8 100644 (file)
@@ -2167,7 +2167,8 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
                tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
                for (i = 14; i < spec->num_channels; i++) {
                        info[i].max_power = MAX_TXPOWER;
-                       info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
+                       info[i].default_power1 =
+                                       TXPOWER_FROM_DEV(tx_power[i - 14]);
                }
        }
 
@@ -2359,26 +2360,40 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
        .config                 = rt73usb_config,
 };
 
-static const struct data_queue_desc rt73usb_queue_rx = {
-       .entry_num              = 32,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = RXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_usb),
-};
+static void rt73usb_queue_init(struct data_queue *queue)
+{
+       switch (queue->qid) {
+       case QID_RX:
+               queue->limit = 32;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = RXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_usb);
+               break;
 
-static const struct data_queue_desc rt73usb_queue_tx = {
-       .entry_num              = 32,
-       .data_size              = DATA_FRAME_SIZE,
-       .desc_size              = TXD_DESC_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_usb),
-};
+       case QID_AC_VO:
+       case QID_AC_VI:
+       case QID_AC_BE:
+       case QID_AC_BK:
+               queue->limit = 32;
+               queue->data_size = DATA_FRAME_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_usb);
+               break;
 
-static const struct data_queue_desc rt73usb_queue_bcn = {
-       .entry_num              = 4,
-       .data_size              = MGMT_FRAME_SIZE,
-       .desc_size              = TXINFO_SIZE,
-       .priv_size              = sizeof(struct queue_entry_priv_usb),
-};
+       case QID_BEACON:
+               queue->limit = 4;
+               queue->data_size = MGMT_FRAME_SIZE;
+               queue->desc_size = TXINFO_SIZE;
+               queue->priv_size = sizeof(struct queue_entry_priv_usb);
+               break;
+
+       case QID_ATIM:
+               /* fallthrough */
+       default:
+               BUG();
+               break;
+       }
+}
 
 static const struct rt2x00_ops rt73usb_ops = {
        .name                   = KBUILD_MODNAME,
@@ -2386,10 +2401,7 @@ static const struct rt2x00_ops rt73usb_ops = {
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
        .tx_queues              = NUM_TX_QUEUES,
-       .extra_tx_headroom      = TXD_DESC_SIZE,
-       .rx                     = &rt73usb_queue_rx,
-       .tx                     = &rt73usb_queue_tx,
-       .bcn                    = &rt73usb_queue_bcn,
+       .queue_init             = rt73usb_queue_init,
        .lib                    = &rt73usb_rt2x00_ops,
        .hw                     = &rt73usb_mac80211_ops,
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
index 953f1a0f853238289620eaedbaf5f80bdc77d3d9..2119313a737bab21e9c304627b2acb56d2a3ab43 100644 (file)
@@ -104,7 +104,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
                        tx_agc[RF90_PATH_A] = 0x10101010;
                        tx_agc[RF90_PATH_B] = 0x10101010;
                } else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
-                          TXHIGHPWRLEVEL_LEVEL1) {
+                          TXHIGHPWRLEVEL_LEVEL2) {
                        tx_agc[RF90_PATH_A] = 0x00000000;
                        tx_agc[RF90_PATH_B] = 0x00000000;
                } else{
index 826f085c29dd5d9155ff0ab1e4ea2a8b2832f70c..2bd5985262171bfdfa81de5494a4ef9640d72cc5 100644 (file)
@@ -359,6 +359,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
        {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
        {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
+       {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
        {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
        {}
 };
index 19a765532603b06d4fa647a4b6ebb1651ae77f03..47875ba09ff8128d22375514ff601dbbf1f6af59 100644 (file)
@@ -842,7 +842,7 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
        long val_y, ele_c = 0;
        u8 ofdm_index[2];
        s8 cck_index = 0;
-       u8 ofdm_index_old[2];
+       u8 ofdm_index_old[2] = {0, 0};
        s8 cck_index_old = 0;
        u8 index;
        int i;
index e4c4cdc3eb67f6d992c29b026d98f244400e696b..d9ee2efffe5ffb1284335352eed2e13ab8e4df96 100644 (file)
@@ -251,7 +251,7 @@ static struct rtl_hal_cfg rtl8723ae_hal_cfg = {
        .bar_id = 2,
        .write_readback = true,
        .name = "rtl8723ae_pci",
-       .fw_name = "rtlwifi/rtl8723aefw.bin",
+       .fw_name = "rtlwifi/rtl8723fw.bin",
        .ops = &rtl8723ae_hal_ops,
        .mod_params = &rtl8723ae_mod_params,
        .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
@@ -353,8 +353,8 @@ MODULE_AUTHOR("Realtek WlanFAE      <wlanfae@realtek.com>");
 MODULE_AUTHOR("Larry Finger    <Larry.Finger@lwfinger.net>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless");
-MODULE_FIRMWARE("rtlwifi/rtl8723aefw.bin");
-MODULE_FIRMWARE("rtlwifi/rtl8723aefw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723fw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723fw_B.bin");
 
 module_param_named(swenc, rtl8723ae_mod_params.sw_crypto, bool, 0444);
 module_param_named(debug, rtl8723ae_mod_params.debug, int, 0444);
index 4c67c2f9ea716a1c8d9430bcd51284867a20a4c8..c7dc6feab2ff38cd2bbbef713ca4b2c0113a4b51 100644 (file)
@@ -93,8 +93,7 @@ static void wl1251_spi_wake(struct wl1251 *wl)
        memset(&t, 0, sizeof(t));
        spi_message_init(&m);
 
-       /*
-        * Set WSPI_INIT_COMMAND
+       /* Set WSPI_INIT_COMMAND
         * the data is being send from the MSB to LSB
         */
        cmd[2] = 0xff;
@@ -262,7 +261,8 @@ static int wl1251_spi_probe(struct spi_device *spi)
        wl->if_ops = &wl1251_spi_ops;
 
        /* This is the only SPI value that we need to set here, the rest
-        * comes from the board-peripherals file */
+        * comes from the board-peripherals file
+        */
        spi->bits_per_word = 32;
 
        ret = spi_setup(spi);
@@ -329,29 +329,7 @@ static struct spi_driver wl1251_spi_driver = {
        .remove         = wl1251_spi_remove,
 };
 
-static int __init wl1251_spi_init(void)
-{
-       int ret;
-
-       ret = spi_register_driver(&wl1251_spi_driver);
-       if (ret < 0) {
-               wl1251_error("failed to register spi driver: %d", ret);
-               goto out;
-       }
-
-out:
-       return ret;
-}
-
-static void __exit wl1251_spi_exit(void)
-{
-       spi_unregister_driver(&wl1251_spi_driver);
-
-       wl1251_notice("unloaded");
-}
-
-module_init(wl1251_spi_init);
-module_exit(wl1251_spi_exit);
+module_spi_driver(wl1251_spi_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
index 9fa692d110250bde779c627e66af87e10ba4f5f2..7aa0eb848c5a2c83cc93f120f96567cd04d5d0fa 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/platform_device.h>
 #include <linux/ip.h>
 #include <linux/firmware.h>
+#include <linux/etherdevice.h>
 
 #include "../wlcore/wlcore.h"
 #include "../wlcore/debug.h"
@@ -594,8 +595,8 @@ static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
                .mem3 = { .start = 0x00000000, .size  = 0x00000000 },
        },
        [PART_PHY_INIT] = {
-               .mem  = { .start = 0x80926000,
-                         .size = sizeof(struct wl18xx_mac_and_phy_params) },
+               .mem  = { .start = WL18XX_PHY_INIT_MEM_ADDR,
+                         .size  = WL18XX_PHY_INIT_MEM_SIZE },
                .reg  = { .start = 0x00000000, .size = 0x00000000 },
                .mem2 = { .start = 0x00000000, .size = 0x00000000 },
                .mem3 = { .start = 0x00000000, .size = 0x00000000 },
@@ -799,6 +800,9 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
        u32 tmp;
        int ret;
 
+       BUILD_BUG_ON(sizeof(struct wl18xx_mac_and_phy_params) >
+               WL18XX_PHY_INIT_MEM_SIZE);
+
        ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
        if (ret < 0)
                goto out;
@@ -815,6 +819,35 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
        wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
 
        ret = wlcore_read32(wl, WL18XX_SCR_PAD2, &tmp);
+       if (ret < 0)
+               goto out;
+
+       /*
+        * Workaround for FDSP code RAM corruption (needed for PG2.1
+        * and newer; for older chips it's a NOP).  Change FDSP clock
+        * settings so that it's muxed to the ATGP clock instead of
+        * its own clock.
+        */
+
+       ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
+       if (ret < 0)
+               goto out;
+
+       /* disable FDSP clock */
+       ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
+                            MEM_FDSP_CLK_120_DISABLE);
+       if (ret < 0)
+               goto out;
+
+       /* set ATPG clock toward FDSP Code RAM rather than its own clock */
+       ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
+                            MEM_FDSP_CODERAM_FUNC_CLK_SEL);
+       if (ret < 0)
+               goto out;
+
+       /* re-enable FDSP clock */
+       ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
+                            MEM_FDSP_CLK_120_ENABLE);
 
 out:
        return ret;
@@ -1286,6 +1319,16 @@ static int wl18xx_get_mac(struct wl1271 *wl)
                ((mac1 & 0xff000000) >> 24);
        wl->fuse_nic_addr = (mac1 & 0xffffff);
 
+       if (!wl->fuse_oui_addr && !wl->fuse_nic_addr) {
+               u8 mac[ETH_ALEN];
+
+               eth_random_addr(mac);
+
+               wl->fuse_oui_addr = (mac[0] << 16) + (mac[1] << 8) + mac[2];
+               wl->fuse_nic_addr = (mac[3] << 16) + (mac[4] << 8) + mac[5];
+               wl1271_warning("MAC address from fuse not available, using random locally administered addresses.");
+       }
+
        ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
 
 out:
index 6306e04cd2580676e6683ae62a64cecc3578c719..05dd8bad27469389e0e5ff3cf713fab083d348d5 100644 (file)
@@ -38,6 +38,9 @@
 #define WL18XX_REG_BOOT_PART_SIZE  0x00014578
 
 #define WL18XX_PHY_INIT_MEM_ADDR   0x80926000
+#define WL18XX_PHY_END_MEM_ADDR           0x8093CA44
+#define WL18XX_PHY_INIT_MEM_SIZE \
+       (WL18XX_PHY_END_MEM_ADDR - WL18XX_PHY_INIT_MEM_ADDR)
 
 #define WL18XX_SDIO_WSPI_BASE          (WL18XX_REGISTERS_BASE)
 #define WL18XX_REG_CONFIG_BASE         (WL18XX_REGISTERS_BASE + 0x02000)
@@ -217,4 +220,16 @@ static const char * const rdl_names[] = {
        [RDL_4_SP]      = "1897 MIMO",
 };
 
+/* FPGA_SPARE_1 register - used to change the PHY ATPG clock at boot time */
+#define WL18XX_PHY_FPGA_SPARE_1                0x8093CA40
+
+/* command to disable FDSP clock */
+#define MEM_FDSP_CLK_120_DISABLE        0x80000000
+
+/* command to set ATPG clock toward FDSP Code RAM rather than its own clock */
+#define MEM_FDSP_CODERAM_FUNC_CLK_SEL  0xC0000000
+
+/* command to re-enable FDSP clock */
+#define MEM_FDSP_CLK_120_ENABLE                0x40000000
+
 #endif /* __REG_H__ */
index b21398f6c3ecd14bdf0558f7fc49becaf482eeca..4f23931d7bd56237f9e1fc623e784fa059b2340e 100644 (file)
@@ -1,5 +1,5 @@
 wlcore-objs            = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
-                         boot.o init.o debugfs.o scan.o
+                         boot.o init.o debugfs.o scan.o sysfs.o
 
 wlcore_spi-objs        = spi.o
 wlcore_sdio-objs       = sdio.o
index 953111a502ee83a0b8834e35cff965c0787666c5..b8db55c868c7b0027e96bf28f7174a00a308eb0f 100644 (file)
@@ -1,10 +1,9 @@
 
 /*
- * This file is part of wl1271
+ * This file is part of wlcore
  *
  * Copyright (C) 2008-2010 Nokia Corporation
- *
- * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ * Copyright (C) 2011-2013 Texas Instruments Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
 
 #include <linux/module.h>
 #include <linux/firmware.h>
-#include <linux/delay.h>
-#include <linux/spi/spi.h>
-#include <linux/crc32.h>
 #include <linux/etherdevice.h>
 #include <linux/vmalloc.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
 #include <linux/wl12xx.h>
-#include <linux/sched.h>
 #include <linux/interrupt.h>
 
 #include "wlcore.h"
 #include "debug.h"
 #include "wl12xx_80211.h"
 #include "io.h"
-#include "event.h"
 #include "tx.h"
-#include "rx.h"
 #include "ps.h"
 #include "init.h"
 #include "debugfs.h"
-#include "cmd.h"
-#include "boot.h"
 #include "testmode.h"
 #include "scan.h"
 #include "hw_ops.h"
-
-#define WL1271_BOOT_RETRIES 3
+#include "sysfs.h"
 
 #define WL1271_BOOT_RETRIES 3
 
@@ -65,8 +53,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
 static void wlcore_op_stop_locked(struct wl1271 *wl);
 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 
-static int wl12xx_set_authorized(struct wl1271 *wl,
-                                struct wl12xx_vif *wlvif)
+static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret;
 
@@ -983,7 +970,7 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
 
 static int wl1271_setup(struct wl1271 *wl)
 {
-       wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
+       wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
                                  sizeof(*wl->fw_status_2) +
                                  wl->fw_status_priv_len, GFP_KERNEL);
        if (!wl->fw_status_1)
@@ -993,7 +980,7 @@ static int wl1271_setup(struct wl1271 *wl)
                                (((u8 *) wl->fw_status_1) +
                                WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
 
-       wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
+       wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
        if (!wl->tx_res_if) {
                kfree(wl->fw_status_1);
                return -ENOMEM;
@@ -1668,8 +1655,7 @@ static int wl1271_configure_suspend(struct wl1271 *wl,
        return 0;
 }
 
-static void wl1271_configure_resume(struct wl1271 *wl,
-                                   struct wl12xx_vif *wlvif)
+static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret = 0;
        bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
@@ -2603,6 +2589,7 @@ unlock:
        cancel_work_sync(&wlvif->rx_streaming_enable_work);
        cancel_work_sync(&wlvif->rx_streaming_disable_work);
        cancel_delayed_work_sync(&wlvif->connection_loss_work);
+       cancel_delayed_work_sync(&wlvif->channel_switch_work);
 
        mutex_lock(&wl->mutex);
 }
@@ -3210,14 +3197,6 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                if (ret < 0)
                        return ret;
 
-               /* the default WEP key needs to be configured at least once */
-               if (key_type == KEY_WEP) {
-                       ret = wl12xx_cmd_set_default_wep_key(wl,
-                                                       wlvif->default_key,
-                                                       wlvif->sta.hlid);
-                       if (ret < 0)
-                               return ret;
-               }
        }
 
        return 0;
@@ -3374,6 +3353,46 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
 }
 EXPORT_SYMBOL_GPL(wlcore_set_key);
 
+static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
+                                         struct ieee80211_vif *vif,
+                                         int key_idx)
+{
+       struct wl1271 *wl = hw->priv;
+       struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+       int ret;
+
+       wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
+                    key_idx);
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state != WLCORE_STATE_ON)) {
+               ret = -EAGAIN;
+               goto out_unlock;
+       }
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out_unlock;
+
+       wlvif->default_key = key_idx;
+
+       /* the default WEP key needs to be configured at least once */
+       if (wlvif->encryption_type == KEY_WEP) {
+               ret = wl12xx_cmd_set_default_wep_key(wl,
+                               key_idx,
+                               wlvif->sta.hlid);
+               if (ret < 0)
+                       goto out_sleep;
+       }
+
+out_sleep:
+       wl1271_ps_elp_sleep(wl);
+
+out_unlock:
+       mutex_unlock(&wl->mutex);
+}
+
 void wlcore_regdomain_config(struct wl1271 *wl)
 {
        int ret;
@@ -3782,8 +3801,7 @@ static int wlcore_set_beacon_template(struct wl1271 *wl,
        struct ieee80211_hdr *hdr;
        u32 min_rate;
        int ret;
-       int ieoffset = offsetof(struct ieee80211_mgmt,
-                               u.beacon.variable);
+       int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
        struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
        u16 tmpl_id;
 
@@ -4230,8 +4248,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
        }
 
        /* Handle new association with HT. Do this after join. */
-       if (sta_exists &&
-           (changed & BSS_CHANGED_HT)) {
+       if (sta_exists) {
                bool enabled =
                        bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
 
@@ -5368,6 +5385,7 @@ static const struct ieee80211_ops wl1271_ops = {
        .ampdu_action = wl1271_op_ampdu_action,
        .tx_frames_pending = wl1271_tx_frames_pending,
        .set_bitrate_mask = wl12xx_set_bitrate_mask,
+       .set_default_unicast_key = wl1271_op_set_default_key_idx,
        .channel_switch = wl12xx_op_channel_switch,
        .flush = wlcore_op_flush,
        .remain_on_channel = wlcore_op_remain_on_channel,
@@ -5403,151 +5421,6 @@ u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
        return idx;
 }
 
-static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
-                                              struct device_attribute *attr,
-                                              char *buf)
-{
-       struct wl1271 *wl = dev_get_drvdata(dev);
-       ssize_t len;
-
-       len = PAGE_SIZE;
-
-       mutex_lock(&wl->mutex);
-       len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
-                      wl->sg_enabled);
-       mutex_unlock(&wl->mutex);
-
-       return len;
-
-}
-
-static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
-                                               struct device_attribute *attr,
-                                               const char *buf, size_t count)
-{
-       struct wl1271 *wl = dev_get_drvdata(dev);
-       unsigned long res;
-       int ret;
-
-       ret = kstrtoul(buf, 10, &res);
-       if (ret < 0) {
-               wl1271_warning("incorrect value written to bt_coex_mode");
-               return count;
-       }
-
-       mutex_lock(&wl->mutex);
-
-       res = !!res;
-
-       if (res == wl->sg_enabled)
-               goto out;
-
-       wl->sg_enabled = res;
-
-       if (unlikely(wl->state != WLCORE_STATE_ON))
-               goto out;
-
-       ret = wl1271_ps_elp_wakeup(wl);
-       if (ret < 0)
-               goto out;
-
-       wl1271_acx_sg_enable(wl, wl->sg_enabled);
-       wl1271_ps_elp_sleep(wl);
-
- out:
-       mutex_unlock(&wl->mutex);
-       return count;
-}
-
-static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
-                  wl1271_sysfs_show_bt_coex_state,
-                  wl1271_sysfs_store_bt_coex_state);
-
-static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
-                                          struct device_attribute *attr,
-                                          char *buf)
-{
-       struct wl1271 *wl = dev_get_drvdata(dev);
-       ssize_t len;
-
-       len = PAGE_SIZE;
-
-       mutex_lock(&wl->mutex);
-       if (wl->hw_pg_ver >= 0)
-               len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
-       else
-               len = snprintf(buf, len, "n/a\n");
-       mutex_unlock(&wl->mutex);
-
-       return len;
-}
-
-static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
-                  wl1271_sysfs_show_hw_pg_ver, NULL);
-
-static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
-                                      struct bin_attribute *bin_attr,
-                                      char *buffer, loff_t pos, size_t count)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct wl1271 *wl = dev_get_drvdata(dev);
-       ssize_t len;
-       int ret;
-
-       ret = mutex_lock_interruptible(&wl->mutex);
-       if (ret < 0)
-               return -ERESTARTSYS;
-
-       /* Let only one thread read the log at a time, blocking others */
-       while (wl->fwlog_size == 0) {
-               DEFINE_WAIT(wait);
-
-               prepare_to_wait_exclusive(&wl->fwlog_waitq,
-                                         &wait,
-                                         TASK_INTERRUPTIBLE);
-
-               if (wl->fwlog_size != 0) {
-                       finish_wait(&wl->fwlog_waitq, &wait);
-                       break;
-               }
-
-               mutex_unlock(&wl->mutex);
-
-               schedule();
-               finish_wait(&wl->fwlog_waitq, &wait);
-
-               if (signal_pending(current))
-                       return -ERESTARTSYS;
-
-               ret = mutex_lock_interruptible(&wl->mutex);
-               if (ret < 0)
-                       return -ERESTARTSYS;
-       }
-
-       /* Check if the fwlog is still valid */
-       if (wl->fwlog_size < 0) {
-               mutex_unlock(&wl->mutex);
-               return 0;
-       }
-
-       /* Seeking is not supported - old logs are not kept. Disregard pos. */
-       len = min(count, (size_t)wl->fwlog_size);
-       wl->fwlog_size -= len;
-       memcpy(buffer, wl->fwlog, len);
-
-       /* Make room for new messages */
-       memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
-
-       mutex_unlock(&wl->mutex);
-
-       return len;
-}
-
-static struct bin_attribute fwlog_attr = {
-       .attr = {.name = "fwlog", .mode = S_IRUSR},
-       .read = wl1271_sysfs_read_fwlog,
-};
-
 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
 {
        int i;
@@ -5827,8 +5700,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
        return 0;
 }
 
-#define WL1271_DEFAULT_CHANNEL 0
-
 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
                                     u32 mbox_size)
 {
@@ -5881,7 +5752,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
                goto err_hw;
        }
 
-       wl->channel = WL1271_DEFAULT_CHANNEL;
+       wl->channel = 0;
        wl->rx_counter = 0;
        wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
        wl->band = IEEE80211_BAND_2GHZ;
@@ -5988,11 +5859,8 @@ int wlcore_free_hw(struct wl1271 *wl)
        wake_up_interruptible_all(&wl->fwlog_waitq);
        mutex_unlock(&wl->mutex);
 
-       device_remove_bin_file(wl->dev, &fwlog_attr);
-
-       device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+       wlcore_sysfs_free(wl);
 
-       device_remove_file(wl->dev, &dev_attr_bt_coex_state);
        kfree(wl->buffer_32);
        kfree(wl->mbox);
        free_page((unsigned long)wl->fwlog);
@@ -6018,6 +5886,15 @@ int wlcore_free_hw(struct wl1271 *wl)
 }
 EXPORT_SYMBOL_GPL(wlcore_free_hw);
 
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support wlcore_wowlan_support = {
+       .flags = WIPHY_WOWLAN_ANY,
+       .n_patterns = WL1271_MAX_RX_FILTERS,
+       .pattern_min_len = 1,
+       .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
+};
+#endif
+
 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
 {
        struct wl1271 *wl = context;
@@ -6071,14 +5948,8 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
        if (!ret) {
                wl->irq_wake_enabled = true;
                device_init_wakeup(wl->dev, 1);
-               if (pdata->pwr_in_suspend) {
-                       wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
-                       wl->hw->wiphy->wowlan.n_patterns =
-                               WL1271_MAX_RX_FILTERS;
-                       wl->hw->wiphy->wowlan.pattern_min_len = 1;
-                       wl->hw->wiphy->wowlan.pattern_max_len =
-                               WL1271_RX_FILTER_MAX_PATTERN_SIZE;
-               }
+               if (pdata->pwr_in_suspend)
+                       wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
        }
 #endif
        disable_irq(wl->irq);
@@ -6101,36 +5972,13 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
        if (ret)
                goto out_irq;
 
-       /* Create sysfs file to control bt coex state */
-       ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
-       if (ret < 0) {
-               wl1271_error("failed to create sysfs file bt_coex_state");
+       ret = wlcore_sysfs_init(wl);
+       if (ret)
                goto out_unreg;
-       }
-
-       /* Create sysfs file to get HW PG version */
-       ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
-       if (ret < 0) {
-               wl1271_error("failed to create sysfs file hw_pg_ver");
-               goto out_bt_coex_state;
-       }
-
-       /* Create sysfs file for the FW log */
-       ret = device_create_bin_file(wl->dev, &fwlog_attr);
-       if (ret < 0) {
-               wl1271_error("failed to create sysfs file fwlog");
-               goto out_hw_pg_ver;
-       }
 
        wl->initialized = true;
        goto out;
 
-out_hw_pg_ver:
-       device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
-
-out_bt_coex_state:
-       device_remove_file(wl->dev, &dev_attr_bt_coex_state);
-
 out_unreg:
        wl1271_unregister_hw(wl);
 
index 9654577efd012e0faaa7e785fe3a4f15cc1ae5ba..98066d40c2ad107943384509abcbc10b82d82019 100644 (file)
@@ -110,7 +110,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
        DECLARE_COMPLETION_ONSTACK(compl);
        unsigned long flags;
        int ret;
-       u32 start_time = jiffies;
+       unsigned long start_time = jiffies;
        bool pending = false;
 
        /*
index e26447832683254fe26c417453bae702c1e1d1d1..1b0cd98e35f187f8be9f0525e152e5960742471d 100644 (file)
@@ -434,19 +434,7 @@ static struct spi_driver wl1271_spi_driver = {
        .remove         = wl1271_remove,
 };
 
-static int __init wl1271_init(void)
-{
-       return spi_register_driver(&wl1271_spi_driver);
-}
-
-static void __exit wl1271_exit(void)
-{
-       spi_unregister_driver(&wl1271_spi_driver);
-}
-
-module_init(wl1271_init);
-module_exit(wl1271_exit);
-
+module_spi_driver(wl1271_spi_driver);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
new file mode 100644 (file)
index 0000000..8e58349
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+ * This file is part of wlcore
+ *
+ * Copyright (C) 2013 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "wlcore.h"
+#include "debug.h"
+#include "ps.h"
+#include "sysfs.h"
+
+static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
+                                              struct device_attribute *attr,
+                                              char *buf)
+{
+       struct wl1271 *wl = dev_get_drvdata(dev);
+       ssize_t len;
+
+       len = PAGE_SIZE;
+
+       mutex_lock(&wl->mutex);
+       len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
+                      wl->sg_enabled);
+       mutex_unlock(&wl->mutex);
+
+       return len;
+
+}
+
+static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
+                                               struct device_attribute *attr,
+                                               const char *buf, size_t count)
+{
+       struct wl1271 *wl = dev_get_drvdata(dev);
+       unsigned long res;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &res);
+       if (ret < 0) {
+               wl1271_warning("incorrect value written to bt_coex_mode");
+               return count;
+       }
+
+       mutex_lock(&wl->mutex);
+
+       res = !!res;
+
+       if (res == wl->sg_enabled)
+               goto out;
+
+       wl->sg_enabled = res;
+
+       if (unlikely(wl->state != WLCORE_STATE_ON))
+               goto out;
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       wl1271_acx_sg_enable(wl, wl->sg_enabled);
+       wl1271_ps_elp_sleep(wl);
+
+ out:
+       mutex_unlock(&wl->mutex);
+       return count;
+}
+
+static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
+                  wl1271_sysfs_show_bt_coex_state,
+                  wl1271_sysfs_store_bt_coex_state);
+
+static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
+{
+       struct wl1271 *wl = dev_get_drvdata(dev);
+       ssize_t len;
+
+       len = PAGE_SIZE;
+
+       mutex_lock(&wl->mutex);
+       if (wl->hw_pg_ver >= 0)
+               len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
+       else
+               len = snprintf(buf, len, "n/a\n");
+       mutex_unlock(&wl->mutex);
+
+       return len;
+}
+
+static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
+                  wl1271_sysfs_show_hw_pg_ver, NULL);
+
+static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
+                                      struct bin_attribute *bin_attr,
+                                      char *buffer, loff_t pos, size_t count)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct wl1271 *wl = dev_get_drvdata(dev);
+       ssize_t len;
+       int ret;
+
+       ret = mutex_lock_interruptible(&wl->mutex);
+       if (ret < 0)
+               return -ERESTARTSYS;
+
+       /* Let only one thread read the log at a time, blocking others */
+       while (wl->fwlog_size == 0) {
+               DEFINE_WAIT(wait);
+
+               prepare_to_wait_exclusive(&wl->fwlog_waitq,
+                                         &wait,
+                                         TASK_INTERRUPTIBLE);
+
+               if (wl->fwlog_size != 0) {
+                       finish_wait(&wl->fwlog_waitq, &wait);
+                       break;
+               }
+
+               mutex_unlock(&wl->mutex);
+
+               schedule();
+               finish_wait(&wl->fwlog_waitq, &wait);
+
+               if (signal_pending(current))
+                       return -ERESTARTSYS;
+
+               ret = mutex_lock_interruptible(&wl->mutex);
+               if (ret < 0)
+                       return -ERESTARTSYS;
+       }
+
+       /* Check if the fwlog is still valid */
+       if (wl->fwlog_size < 0) {
+               mutex_unlock(&wl->mutex);
+               return 0;
+       }
+
+       /* Seeking is not supported - old logs are not kept. Disregard pos. */
+       len = min(count, (size_t)wl->fwlog_size);
+       wl->fwlog_size -= len;
+       memcpy(buffer, wl->fwlog, len);
+
+       /* Make room for new messages */
+       memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
+
+       mutex_unlock(&wl->mutex);
+
+       return len;
+}
+
+static struct bin_attribute fwlog_attr = {
+       .attr = {.name = "fwlog", .mode = S_IRUSR},
+       .read = wl1271_sysfs_read_fwlog,
+};
+
+int wlcore_sysfs_init(struct wl1271 *wl)
+{
+       int ret;
+
+       /* Create sysfs file to control bt coex state */
+       ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
+       if (ret < 0) {
+               wl1271_error("failed to create sysfs file bt_coex_state");
+               goto out;
+       }
+
+       /* Create sysfs file to get HW PG version */
+       ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
+       if (ret < 0) {
+               wl1271_error("failed to create sysfs file hw_pg_ver");
+               goto out_bt_coex_state;
+       }
+
+       /* Create sysfs file for the FW log */
+       ret = device_create_bin_file(wl->dev, &fwlog_attr);
+       if (ret < 0) {
+               wl1271_error("failed to create sysfs file fwlog");
+               goto out_hw_pg_ver;
+       }
+
+       goto out;
+
+out_hw_pg_ver:
+       device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+
+out_bt_coex_state:
+       device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+
+out:
+       return ret;
+}
+
+void wlcore_sysfs_free(struct wl1271 *wl)
+{
+       device_remove_bin_file(wl->dev, &fwlog_attr);
+
+       device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+
+       device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+}
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.h b/drivers/net/wireless/ti/wlcore/sysfs.h
new file mode 100644 (file)
index 0000000..c148892
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * This file is part of wlcore
+ *
+ * Copyright (C) 2013 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __SYSFS_H__
+#define __SYSFS_H__
+
+int wlcore_sysfs_init(struct wl1271 *wl);
+void wlcore_sysfs_free(struct wl1271 *wl);
+
+#endif
index 004d02e71f01a25eb3abf9496fa752f79af3df7d..7e93fe63a2c74a215b4948a91a95c179a8d78592 100644 (file)
@@ -386,7 +386,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
                         (cipher == WLAN_CIPHER_SUITE_WEP104);
 
-               if (unlikely(is_wep && wlvif->default_key != idx)) {
+               if (WARN_ON(is_wep && wlvif->default_key != idx)) {
                        ret = wl1271_set_default_wep_key(wl, wlvif, idx);
                        if (ret < 0)
                                return ret;
index 74a852e4e41f0e11fbc8c891513648af6fcf9457..b0b64ccb7d7d5d2581047bbd8ff151dabbf3492c 100644 (file)
@@ -36,6 +36,16 @@ config NFC_MEI_PHY
 
          If unsure, say N.
 
+config NFC_SIM
+       tristate "NFC hardware simulator driver"
+       help
+         This driver declares two virtual NFC devices supporting NFC-DEP
+         protocol. An LLCP connection can be established between them and
+         all packets sent from one device is sent back to the other, acting as
+         loopback devices.
+
+         If unsure, say N.
+
 source "drivers/nfc/pn544/Kconfig"
 source "drivers/nfc/microread/Kconfig"
 
index aa6bd657ef409cca9ebe158737df2fdce11a85f8..be7636abcb3fa2c79076f533b54ab4769251fbb3 100644 (file)
@@ -7,5 +7,6 @@ obj-$(CONFIG_NFC_MICROREAD)     += microread/
 obj-$(CONFIG_NFC_PN533)                += pn533.o
 obj-$(CONFIG_NFC_WILINK)       += nfcwilink.o
 obj-$(CONFIG_NFC_MEI_PHY)      += mei_phy.o
+obj-$(CONFIG_NFC_SIM)          += nfcsim.o
 
 ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
index 1201bdbfb7918175bc22ae2b1741174b7fbe977c..606bf55e76ec5941bf69443f8113a38266b4c000 100644 (file)
@@ -30,7 +30,7 @@ struct mei_nfc_hdr {
        u16 req_id;
        u32 reserved;
        u16 data_size;
-} __attribute__((packed));
+} __packed;
 
 #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
 
@@ -60,8 +60,8 @@ int nfc_mei_phy_enable(void *phy_id)
 
        r = mei_cl_enable_device(phy->device);
        if (r < 0) {
-                pr_err("MEI_PHY: Could not enable device\n");
-                return r;
+               pr_err("MEI_PHY: Could not enable device\n");
+               return r;
        }
 
        r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
index 3420d833db170e2e308e48d3bedab2b988dba29f..cdb9f6de132a5c16b564e2a1228bbc3ae594ec2f 100644 (file)
@@ -650,7 +650,7 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
 {
        struct microread_info *info;
        unsigned long quirks = 0;
-       u32 protocols, se;
+       u32 protocols;
        struct nfc_hci_init_data init_data;
        int r;
 
@@ -678,10 +678,8 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
                    NFC_PROTO_ISO14443_B_MASK |
                    NFC_PROTO_NFC_DEP_MASK;
 
-       se = NFC_SE_UICC | NFC_SE_EMBEDDED;
-
        info->hdev = nfc_hci_allocate_device(&microread_hci_ops, &init_data,
-                                            quirks, protocols, se, llc_name,
+                                            quirks, protocols, llc_name,
                                             phy_headroom +
                                             MICROREAD_CMDS_HEADROOM,
                                             phy_tailroom +
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
new file mode 100644 (file)
index 0000000..c5c30fb
--- /dev/null
@@ -0,0 +1,541 @@
+/*
+ * NFC hardware simulation driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <net/nfc/nfc.h>
+
+#define DEV_ERR(_dev, fmt, args...) nfc_dev_err(&_dev->nfc_dev->dev, \
+                                               "%s: " fmt, __func__, ## args)
+
+#define DEV_DBG(_dev, fmt, args...) nfc_dev_dbg(&_dev->nfc_dev->dev, \
+                                               "%s: " fmt, __func__, ## args)
+
+#define NFCSIM_VERSION "0.1"
+
+#define NFCSIM_POLL_NONE       0
+#define NFCSIM_POLL_INITIATOR  1
+#define NFCSIM_POLL_TARGET     2
+#define NFCSIM_POLL_DUAL       (NFCSIM_POLL_INITIATOR | NFCSIM_POLL_TARGET)
+
+struct nfcsim {
+       struct nfc_dev *nfc_dev;
+
+       struct mutex lock;
+
+       struct delayed_work recv_work;
+
+       struct sk_buff *clone_skb;
+
+       struct delayed_work poll_work;
+       u8 polling_mode;
+       u8 curr_polling_mode;
+
+       u8 shutting_down;
+
+       u8 up;
+
+       u8 initiator;
+
+       data_exchange_cb_t cb;
+       void *cb_context;
+
+       struct nfcsim *peer_dev;
+};
+
+static struct nfcsim *dev0;
+static struct nfcsim *dev1;
+
+struct workqueue_struct *wq;
+
+static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown)
+{
+       DEV_DBG(dev, "shutdown=%d", shutdown);
+
+       mutex_lock(&dev->lock);
+
+       dev->polling_mode = NFCSIM_POLL_NONE;
+       dev->shutting_down = shutdown;
+       dev->cb = NULL;
+       dev_kfree_skb(dev->clone_skb);
+       dev->clone_skb = NULL;
+
+       mutex_unlock(&dev->lock);
+
+       cancel_delayed_work_sync(&dev->poll_work);
+       cancel_delayed_work_sync(&dev->recv_work);
+}
+
+static int nfcsim_target_found(struct nfcsim *dev)
+{
+       struct nfc_target nfc_tgt;
+
+       DEV_DBG(dev, "");
+
+       memset(&nfc_tgt, 0, sizeof(struct nfc_target));
+
+       nfc_tgt.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+       nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1);
+
+       return 0;
+}
+
+static int nfcsim_dev_up(struct nfc_dev *nfc_dev)
+{
+       struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+
+       DEV_DBG(dev, "");
+
+       mutex_lock(&dev->lock);
+
+       dev->up = 1;
+
+       mutex_unlock(&dev->lock);
+
+       return 0;
+}
+
+static int nfcsim_dev_down(struct nfc_dev *nfc_dev)
+{
+       struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+
+       DEV_DBG(dev, "");
+
+       mutex_lock(&dev->lock);
+
+       dev->up = 0;
+
+       mutex_unlock(&dev->lock);
+
+       return 0;
+}
+
+static int nfcsim_dep_link_up(struct nfc_dev *nfc_dev,
+                             struct nfc_target *target,
+                             u8 comm_mode, u8 *gb, size_t gb_len)
+{
+       int rc;
+       struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+       struct nfcsim *peer = dev->peer_dev;
+       u8 *remote_gb;
+       size_t remote_gb_len;
+
+       DEV_DBG(dev, "target_idx: %d, comm_mode: %d\n", target->idx, comm_mode);
+
+       mutex_lock(&peer->lock);
+
+       nfc_tm_activated(peer->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
+                        NFC_COMM_ACTIVE, gb, gb_len);
+
+       remote_gb = nfc_get_local_general_bytes(peer->nfc_dev, &remote_gb_len);
+       if (!remote_gb) {
+               DEV_ERR(peer, "Can't get remote general bytes");
+
+               mutex_unlock(&peer->lock);
+               return -EINVAL;
+       }
+
+       mutex_unlock(&peer->lock);
+
+       mutex_lock(&dev->lock);
+
+       rc = nfc_set_remote_general_bytes(nfc_dev, remote_gb, remote_gb_len);
+       if (rc) {
+               DEV_ERR(dev, "Can't set remote general bytes");
+               mutex_unlock(&dev->lock);
+               return rc;
+       }
+
+       rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_ACTIVE,
+                               NFC_RF_INITIATOR);
+
+       mutex_unlock(&dev->lock);
+
+       return rc;
+}
+
+static int nfcsim_dep_link_down(struct nfc_dev *nfc_dev)
+{
+       struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+
+       DEV_DBG(dev, "");
+
+       nfcsim_cleanup_dev(dev, 0);
+
+       return 0;
+}
+
+static int nfcsim_start_poll(struct nfc_dev *nfc_dev,
+                            u32 im_protocols, u32 tm_protocols)
+{
+       struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+       int rc;
+
+       mutex_lock(&dev->lock);
+
+       if (dev->polling_mode != NFCSIM_POLL_NONE) {
+               DEV_ERR(dev, "Already in polling mode");
+               rc = -EBUSY;
+               goto exit;
+       }
+
+       if (im_protocols & NFC_PROTO_NFC_DEP_MASK)
+               dev->polling_mode |= NFCSIM_POLL_INITIATOR;
+
+       if (tm_protocols & NFC_PROTO_NFC_DEP_MASK)
+               dev->polling_mode |= NFCSIM_POLL_TARGET;
+
+       if (dev->polling_mode == NFCSIM_POLL_NONE) {
+               DEV_ERR(dev, "Unsupported polling mode");
+               rc = -EINVAL;
+               goto exit;
+       }
+
+       dev->initiator = 0;
+       dev->curr_polling_mode = NFCSIM_POLL_NONE;
+
+       queue_delayed_work(wq, &dev->poll_work, 0);
+
+       DEV_DBG(dev, "Start polling: im: 0x%X, tm: 0x%X", im_protocols,
+               tm_protocols);
+
+       rc = 0;
+exit:
+       mutex_unlock(&dev->lock);
+
+       return rc;
+}
+
+static void nfcsim_stop_poll(struct nfc_dev *nfc_dev)
+{
+       struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+
+       DEV_DBG(dev, "Stop poll");
+
+       mutex_lock(&dev->lock);
+
+       dev->polling_mode = NFCSIM_POLL_NONE;
+
+       mutex_unlock(&dev->lock);
+
+       cancel_delayed_work_sync(&dev->poll_work);
+}
+
+static int nfcsim_activate_target(struct nfc_dev *nfc_dev,
+                                 struct nfc_target *target, u32 protocol)
+{
+       struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+
+       DEV_DBG(dev, "");
+
+       return -ENOTSUPP;
+}
+
+static void nfcsim_deactivate_target(struct nfc_dev *nfc_dev,
+                                    struct nfc_target *target)
+{
+       struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+
+       DEV_DBG(dev, "");
+}
+
+static void nfcsim_wq_recv(struct work_struct *work)
+{
+       struct nfcsim *dev = container_of(work, struct nfcsim,
+                                         recv_work.work);
+
+       mutex_lock(&dev->lock);
+
+       if (dev->shutting_down || !dev->up || !dev->clone_skb) {
+               dev_kfree_skb(dev->clone_skb);
+               goto exit;
+       }
+
+       if (dev->initiator) {
+               if (!dev->cb) {
+                       DEV_ERR(dev, "Null recv callback");
+                       dev_kfree_skb(dev->clone_skb);
+                       goto exit;
+               }
+
+               dev->cb(dev->cb_context, dev->clone_skb, 0);
+               dev->cb = NULL;
+       } else {
+               nfc_tm_data_received(dev->nfc_dev, dev->clone_skb);
+       }
+
+exit:
+       dev->clone_skb = NULL;
+
+       mutex_unlock(&dev->lock);
+}
+
+static int nfcsim_tx(struct nfc_dev *nfc_dev, struct nfc_target *target,
+                    struct sk_buff *skb, data_exchange_cb_t cb,
+                    void *cb_context)
+{
+       struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
+       struct nfcsim *peer = dev->peer_dev;
+       int err;
+
+       mutex_lock(&dev->lock);
+
+       if (dev->shutting_down || !dev->up) {
+               mutex_unlock(&dev->lock);
+               err = -ENODEV;
+               goto exit;
+       }
+
+       dev->cb = cb;
+       dev->cb_context = cb_context;
+
+       mutex_unlock(&dev->lock);
+
+       mutex_lock(&peer->lock);
+
+       peer->clone_skb = skb_clone(skb, GFP_KERNEL);
+
+       if (!peer->clone_skb) {
+               DEV_ERR(dev, "skb_clone failed");
+               mutex_unlock(&peer->lock);
+               err = -ENOMEM;
+               goto exit;
+       }
+
+       /* This simulates an arbitrary transmission delay between the 2 devices.
+        * If packet transmission occurs immediately between them, we have a
+        * non-stop flow of several tens of thousands SYMM packets per second
+        * and a burning cpu.
+        *
+        * TODO: Add support for a sysfs entry to control this delay.
+        */
+       queue_delayed_work(wq, &peer->recv_work, msecs_to_jiffies(5));
+
+       mutex_unlock(&peer->lock);
+
+       err = 0;
+exit:
+       dev_kfree_skb(skb);
+
+       return err;
+}
+
+static int nfcsim_im_transceive(struct nfc_dev *nfc_dev,
+                               struct nfc_target *target, struct sk_buff *skb,
+                               data_exchange_cb_t cb, void *cb_context)
+{
+       return nfcsim_tx(nfc_dev, target, skb, cb, cb_context);
+}
+
+static int nfcsim_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
+{
+       return nfcsim_tx(nfc_dev, NULL, skb, NULL, NULL);
+}
+
+static struct nfc_ops nfcsim_nfc_ops = {
+       .dev_up = nfcsim_dev_up,
+       .dev_down = nfcsim_dev_down,
+       .dep_link_up = nfcsim_dep_link_up,
+       .dep_link_down = nfcsim_dep_link_down,
+       .start_poll = nfcsim_start_poll,
+       .stop_poll = nfcsim_stop_poll,
+       .activate_target = nfcsim_activate_target,
+       .deactivate_target = nfcsim_deactivate_target,
+       .im_transceive = nfcsim_im_transceive,
+       .tm_send = nfcsim_tm_send,
+};
+
+static void nfcsim_set_polling_mode(struct nfcsim *dev)
+{
+       if (dev->polling_mode == NFCSIM_POLL_NONE) {
+               dev->curr_polling_mode = NFCSIM_POLL_NONE;
+               return;
+       }
+
+       if (dev->curr_polling_mode == NFCSIM_POLL_NONE) {
+               if (dev->polling_mode & NFCSIM_POLL_INITIATOR)
+                       dev->curr_polling_mode = NFCSIM_POLL_INITIATOR;
+               else
+                       dev->curr_polling_mode = NFCSIM_POLL_TARGET;
+
+               return;
+       }
+
+       if (dev->polling_mode == NFCSIM_POLL_DUAL) {
+               if (dev->curr_polling_mode == NFCSIM_POLL_TARGET)
+                       dev->curr_polling_mode = NFCSIM_POLL_INITIATOR;
+               else
+                       dev->curr_polling_mode = NFCSIM_POLL_TARGET;
+       }
+}
+
+static void nfcsim_wq_poll(struct work_struct *work)
+{
+       struct nfcsim *dev = container_of(work, struct nfcsim, poll_work.work);
+       struct nfcsim *peer = dev->peer_dev;
+
+       /* These work items run on an ordered workqueue and are therefore
+        * serialized. So we can take both mutexes without being dead locked.
+        */
+       mutex_lock(&dev->lock);
+       mutex_lock(&peer->lock);
+
+       nfcsim_set_polling_mode(dev);
+
+       if (dev->curr_polling_mode == NFCSIM_POLL_NONE) {
+               DEV_DBG(dev, "Not polling");
+               goto unlock;
+       }
+
+       DEV_DBG(dev, "Polling as %s",
+               dev->curr_polling_mode == NFCSIM_POLL_INITIATOR ?
+               "initiator" : "target");
+
+       if (dev->curr_polling_mode == NFCSIM_POLL_TARGET)
+               goto sched_work;
+
+       if (peer->curr_polling_mode == NFCSIM_POLL_TARGET) {
+               peer->polling_mode = NFCSIM_POLL_NONE;
+               dev->polling_mode = NFCSIM_POLL_NONE;
+
+               dev->initiator = 1;
+
+               nfcsim_target_found(dev);
+
+               goto unlock;
+       }
+
+sched_work:
+       /* This defines the delay for an initiator to check if the other device
+        * is polling in target mode.
+        * If the device starts in dual mode polling, it switches between
+        * initiator and target at every round.
+        * Because the wq is ordered and only 1 work item is executed at a time,
+        * we'll always have one device polling as initiator and the other as
+        * target at some point, even if both are started in dual mode.
+        */
+       queue_delayed_work(wq, &dev->poll_work, msecs_to_jiffies(200));
+
+unlock:
+       mutex_unlock(&peer->lock);
+       mutex_unlock(&dev->lock);
+}
+
+static struct nfcsim *nfcsim_init_dev(void)
+{
+       struct nfcsim *dev;
+       int rc = -ENOMEM;
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (dev == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       mutex_init(&dev->lock);
+
+       INIT_DELAYED_WORK(&dev->recv_work, nfcsim_wq_recv);
+       INIT_DELAYED_WORK(&dev->poll_work, nfcsim_wq_poll);
+
+       dev->nfc_dev = nfc_allocate_device(&nfcsim_nfc_ops,
+                                          NFC_PROTO_NFC_DEP_MASK,
+                                          0, 0);
+       if (!dev->nfc_dev)
+               goto error;
+
+       nfc_set_drvdata(dev->nfc_dev, dev);
+
+       rc = nfc_register_device(dev->nfc_dev);
+       if (rc)
+               goto free_nfc_dev;
+
+       return dev;
+
+free_nfc_dev:
+       nfc_free_device(dev->nfc_dev);
+
+error:
+       kfree(dev);
+
+       return ERR_PTR(rc);
+}
+
+static void nfcsim_free_device(struct nfcsim *dev)
+{
+       nfc_unregister_device(dev->nfc_dev);
+
+       nfc_free_device(dev->nfc_dev);
+
+       kfree(dev);
+}
+
+int __init nfcsim_init(void)
+{
+       int rc;
+
+       /* We need an ordered wq to ensure that poll_work items are executed
+        * one at a time.
+        */
+       wq = alloc_ordered_workqueue("nfcsim", 0);
+       if (!wq) {
+               rc = -ENOMEM;
+               goto exit;
+       }
+
+       dev0 = nfcsim_init_dev();
+       if (IS_ERR(dev0)) {
+               rc = PTR_ERR(dev0);
+               goto exit;
+       }
+
+       dev1 = nfcsim_init_dev();
+       if (IS_ERR(dev1)) {
+               kfree(dev0);
+
+               rc = PTR_ERR(dev1);
+               goto exit;
+       }
+
+       dev0->peer_dev = dev1;
+       dev1->peer_dev = dev0;
+
+       pr_debug("NFCsim " NFCSIM_VERSION " initialized\n");
+
+       rc = 0;
+exit:
+       if (rc)
+               pr_err("Failed to initialize nfcsim driver (%d)\n",
+                      rc);
+
+       return rc;
+}
+
+void __exit nfcsim_exit(void)
+{
+       nfcsim_cleanup_dev(dev0, 1);
+       nfcsim_cleanup_dev(dev1, 1);
+
+       nfcsim_free_device(dev0);
+       nfcsim_free_device(dev1);
+
+       destroy_workqueue(wq);
+}
+
+module_init(nfcsim_init);
+module_exit(nfcsim_exit);
+
+MODULE_DESCRIPTION("NFCSim driver ver " NFCSIM_VERSION);
+MODULE_VERSION(NFCSIM_VERSION);
+MODULE_LICENSE("GPL");
index 3b731acbc408fbc807ea8ffa4eef9f0dc622e97f..59f95d8fc98c887228d15e0421c69906bdd85154 100644 (file)
@@ -109,7 +109,7 @@ enum {
        NFCWILINK_FW_DOWNLOAD,
 };
 
-static int nfcwilink_send(struct sk_buff *skb);
+static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb);
 
 static inline struct sk_buff *nfcwilink_skb_alloc(unsigned int len, gfp_t how)
 {
@@ -156,8 +156,6 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name)
                return -ENOMEM;
        }
 
-       skb->dev = (void *)drv->ndev;
-
        cmd = (struct nci_vs_nfcc_info_cmd *)
                        skb_put(skb, sizeof(struct nci_vs_nfcc_info_cmd));
        cmd->gid = NCI_VS_NFCC_INFO_CMD_GID;
@@ -166,7 +164,7 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name)
 
        drv->nfcc_info.plen = 0;
 
-       rc = nfcwilink_send(skb);
+       rc = nfcwilink_send(drv->ndev, skb);
        if (rc)
                return rc;
 
@@ -232,11 +230,9 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len)
                return -ENOMEM;
        }
 
-       skb->dev = (void *)drv->ndev;
-
        memcpy(skb_put(skb, len), data, len);
 
-       rc = nfcwilink_send(skb);
+       rc = nfcwilink_send(drv->ndev, skb);
        if (rc)
                return rc;
 
@@ -371,10 +367,8 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
                return 0;
        }
 
-       skb->dev = (void *) drv->ndev;
-
        /* Forward skb to NCI core layer */
-       rc = nci_recv_frame(skb);
+       rc = nci_recv_frame(drv->ndev, skb);
        if (rc < 0) {
                nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc);
                return rc;
@@ -480,9 +474,8 @@ static int nfcwilink_close(struct nci_dev *ndev)
        return rc;
 }
 
-static int nfcwilink_send(struct sk_buff *skb)
+static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb)
 {
-       struct nci_dev *ndev = (struct nci_dev *)skb->dev;
        struct nfcwilink *drv = nci_get_drvdata(ndev);
        struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000};
        long len;
@@ -542,7 +535,6 @@ static int nfcwilink_probe(struct platform_device *pdev)
 
        drv->ndev = nci_allocate_device(&nfcwilink_ops,
                                        protocols,
-                                       NFC_SE_NONE,
                                        NFCWILINK_HDR_LEN,
                                        0);
        if (!drv->ndev) {
index 8f6f2baa930d7efea25f733f44f115dd4495b3b8..bfb4a4e7c604f02b20c5fa047dfb2010dec29c44 100644 (file)
@@ -258,7 +258,7 @@ static const struct pn533_poll_modulations poll_mod[] = {
                                .opcode = PN533_FELICA_OPC_SENSF_REQ,
                                .sc = PN533_FELICA_SENSF_SC_ALL,
                                .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE,
-                               .tsn = 0,
+                               .tsn = 0x03,
                        },
                },
                .len = 7,
@@ -271,7 +271,7 @@ static const struct pn533_poll_modulations poll_mod[] = {
                                .opcode = PN533_FELICA_OPC_SENSF_REQ,
                                .sc = PN533_FELICA_SENSF_SC_ALL,
                                .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE,
-                               .tsn = 0,
+                               .tsn = 0x03,
                        },
                 },
                .len = 7,
@@ -1235,7 +1235,7 @@ static int pn533_target_found_type_a(struct nfc_target *nfc_tgt, u8 *tgt_data,
 struct pn533_target_felica {
        u8 pol_res;
        u8 opcode;
-       u8 nfcid2[8];
+       u8 nfcid2[NFC_NFCID2_MAXSIZE];
        u8 pad[8];
        /* optional */
        u8 syst_code[];
@@ -1275,6 +1275,9 @@ static int pn533_target_found_felica(struct nfc_target *nfc_tgt, u8 *tgt_data,
        memcpy(nfc_tgt->sensf_res, &tgt_felica->opcode, 9);
        nfc_tgt->sensf_res_len = 9;
 
+       memcpy(nfc_tgt->nfcid2, tgt_felica->nfcid2, NFC_NFCID2_MAXSIZE);
+       nfc_tgt->nfcid2_len = NFC_NFCID2_MAXSIZE;
+
        return 0;
 }
 
@@ -2084,6 +2087,9 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
        if (comm_mode == NFC_COMM_PASSIVE)
                skb_len += PASSIVE_DATA_LEN;
 
+       if (target && target->nfcid2_len)
+               skb_len += NFC_NFCID3_MAXSIZE;
+
        skb = pn533_alloc_skb(dev, skb_len);
        if (!skb)
                return -ENOMEM;
@@ -2100,6 +2106,12 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
                *next |= 1;
        }
 
+       if (target && target->nfcid2_len) {
+               memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2,
+                      target->nfcid2_len);
+               *next |= 2;
+       }
+
        if (gb != NULL && gb_len > 0) {
                memcpy(skb_put(skb, gb_len), gb, gb_len);
                *next |= 4; /* We have some Gi */
@@ -2489,7 +2501,7 @@ static void pn533_acr122_poweron_rdr_resp(struct urb *urb)
 
        nfc_dev_dbg(&urb->dev->dev, "%s", __func__);
 
-       print_hex_dump(KERN_ERR, "ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
+       print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
                       urb->transfer_buffer, urb->transfer_buffer_length,
                       false);
 
@@ -2520,7 +2532,7 @@ static int pn533_acr122_poweron_rdr(struct pn533 *dev)
        dev->out_urb->transfer_buffer = cmd;
        dev->out_urb->transfer_buffer_length = sizeof(cmd);
 
-       print_hex_dump(KERN_ERR, "ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1,
+       print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1,
                       cmd, sizeof(cmd), false);
 
        rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
@@ -2774,17 +2786,18 @@ static int pn533_probe(struct usb_interface *interface,
                goto destroy_wq;
 
        nfc_dev_info(&dev->interface->dev,
-                    "NXP PN533 firmware ver %d.%d now attached",
-                    fw_ver.ver, fw_ver.rev);
+                    "NXP PN5%02X firmware ver %d.%d now attached",
+                    fw_ver.ic, fw_ver.ver, fw_ver.rev);
 
 
        dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
-                                          NFC_SE_NONE,
                                           dev->ops->tx_header_len +
                                           PN533_CMD_DATAEXCH_HEAD_LEN,
                                           dev->ops->tx_tail_len);
-       if (!dev->nfc_dev)
+       if (!dev->nfc_dev) {
+               rc = -ENOMEM;
                goto destroy_wq;
+       }
 
        nfc_set_parent_dev(dev->nfc_dev, &interface->dev);
        nfc_set_drvdata(dev->nfc_dev, dev);
index 9c5f16e7baefa40970f7d3274aaff53fcd2f8c9a..0d17da7675b75b245a26d79498fc58418054f71d 100644 (file)
@@ -551,20 +551,25 @@ static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
                        return -EPROTO;
                }
 
-               r = nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE,
-                                    PN544_RF_READER_CMD_ACTIVATE_NEXT,
-                                    uid_skb->data, uid_skb->len, NULL);
-               kfree_skb(uid_skb);
-
-               r = nfc_hci_send_cmd(hdev,
+               /* Type F NFC-DEP IDm has prefix 0x01FE */
+               if ((uid_skb->data[0] == 0x01) && (uid_skb->data[1] == 0xfe)) {
+                       kfree_skb(uid_skb);
+                       r = nfc_hci_send_cmd(hdev,
                                        PN544_RF_READER_NFCIP1_INITIATOR_GATE,
                                        PN544_HCI_CMD_CONTINUE_ACTIVATION,
                                        NULL, 0, NULL);
-               if (r < 0)
-                       return r;
+                       if (r < 0)
+                               return r;
 
-               target->hci_reader_gate = PN544_RF_READER_NFCIP1_INITIATOR_GATE;
-               target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+                       target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+                       target->hci_reader_gate =
+                               PN544_RF_READER_NFCIP1_INITIATOR_GATE;
+               } else {
+                       r = nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE,
+                                            PN544_RF_READER_CMD_ACTIVATE_NEXT,
+                                            uid_skb->data, uid_skb->len, NULL);
+                       kfree_skb(uid_skb);
+               }
        } else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) {
                /*
                 * TODO: maybe other ISO 14443 require some kind of continue
@@ -706,12 +711,9 @@ static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
                 return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
                                     PN544_RF_READER_CMD_ACTIVATE_NEXT,
                                     target->nfcid1, target->nfcid1_len, NULL);
-       } else if (target->supported_protocols & NFC_PROTO_JEWEL_MASK) {
-               return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
-                                       PN544_JEWEL_RAW_CMD, NULL, 0, NULL);
-       } else if (target->supported_protocols & NFC_PROTO_FELICA_MASK) {
-               return nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE,
-                                       PN544_FELICA_RAW, NULL, 0, NULL);
+       } else if (target->supported_protocols & (NFC_PROTO_JEWEL_MASK |
+                                               NFC_PROTO_FELICA_MASK)) {
+               return -EOPNOTSUPP;
        } else if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) {
                return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
                                        PN544_HCI_CMD_ATTREQUEST,
@@ -801,7 +803,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
                    struct nfc_hci_dev **hdev)
 {
        struct pn544_hci_info *info;
-       u32 protocols, se;
+       u32 protocols;
        struct nfc_hci_init_data init_data;
        int r;
 
@@ -834,10 +836,8 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
                    NFC_PROTO_ISO14443_B_MASK |
                    NFC_PROTO_NFC_DEP_MASK;
 
-       se = NFC_SE_UICC | NFC_SE_EMBEDDED;
-
        info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data, 0,
-                                            protocols, se, llc_name,
+                                            protocols, llc_name,
                                             phy_headroom + PN544_CMDS_HEADROOM,
                                             phy_tailroom, phy_payload);
        if (!info->hdev) {
index a3a851e49321542ac84659f4f3315219163430ae..18c0d8d1ddf779dbba8f09d7b898b7b687bd0a74 100644 (file)
@@ -68,12 +68,6 @@ MODULE_LICENSE("Dual MPL/GPL");
 
 #if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B)
 
-/* The RPX series use SLOT_B */
-#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
-#define CONFIG_PCMCIA_SLOT_B
-#define CONFIG_BD_IS_MHZ
-#endif
-
 /* The ADS board use SLOT_A */
 #ifdef CONFIG_ADS
 #define CONFIG_PCMCIA_SLOT_A
@@ -253,81 +247,6 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev);
 
 #define PCMCIA_BMT_LIMIT (15*4)        /* Bus Monitor Timeout value */
 
-/* ------------------------------------------------------------------------- */
-/* board specific stuff:                                                     */
-/* voltage_set(), hardware_enable() and hardware_disable()                   */
-/* ------------------------------------------------------------------------- */
-/* RPX Boards from Embedded Planet                                           */
-
-#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE)
-
-/* The RPX boards seems to have it's bus monitor timeout set to 6*8 clocks.
- * SYPCR is write once only, therefore must the slowest memory be faster
- * than the bus monitor or we will get a machine check due to the bus timeout.
- */
-
-#define PCMCIA_BOARD_MSG "RPX CLASSIC or RPX LITE"
-
-#undef PCMCIA_BMT_LIMIT
-#define PCMCIA_BMT_LIMIT (6*8)
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
-       u32 reg = 0;
-
-       switch (vcc) {
-       case 0:
-               break;
-       case 33:
-               reg |= BCSR1_PCVCTL4;
-               break;
-       case 50:
-               reg |= BCSR1_PCVCTL5;
-               break;
-       default:
-               return 1;
-       }
-
-       switch (vpp) {
-       case 0:
-               break;
-       case 33:
-       case 50:
-               if (vcc == vpp)
-                       reg |= BCSR1_PCVCTL6;
-               else
-                       return 1;
-               break;
-       case 120:
-               reg |= BCSR1_PCVCTL7;
-       default:
-               return 1;
-       }
-
-       if (!((vcc == 50) || (vcc == 0)))
-               return 1;
-
-       /* first, turn off all power */
-
-       out_be32(((u32 *) RPX_CSR_ADDR),
-                in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 |
-                                                    BCSR1_PCVCTL5 |
-                                                    BCSR1_PCVCTL6 |
-                                                    BCSR1_PCVCTL7));
-
-       /* enable new powersettings */
-
-       out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg);
-
-       return 0;
-}
-
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_)        /* No hardware to enable */
-#define hardware_disable(_slot_)       /* No hardware to disable */
-
-#endif                         /* CONFIG_RPXCLASSIC */
-
 /* FADS Boards from Motorola                                               */
 
 #if defined(CONFIG_FADS)
@@ -419,65 +338,6 @@ static inline int voltage_set(int slot, int vcc, int vpp)
 
 #endif
 
-/* ------------------------------------------------------------------------- */
-/* Motorola MBX860                                                           */
-
-#if defined(CONFIG_MBX)
-
-#define PCMCIA_BOARD_MSG "MBX"
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
-       u8 reg = 0;
-
-       switch (vcc) {
-       case 0:
-               break;
-       case 33:
-               reg |= CSR2_VCC_33;
-               break;
-       case 50:
-               reg |= CSR2_VCC_50;
-               break;
-       default:
-               return 1;
-       }
-
-       switch (vpp) {
-       case 0:
-               break;
-       case 33:
-       case 50:
-               if (vcc == vpp)
-                       reg |= CSR2_VPP_VCC;
-               else
-                       return 1;
-               break;
-       case 120:
-               if ((vcc == 33) || (vcc == 50))
-                       reg |= CSR2_VPP_12;
-               else
-                       return 1;
-       default:
-               return 1;
-       }
-
-       /* first, turn off all power */
-       out_8((u8 *) MBX_CSR2_ADDR,
-             in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
-
-       /* enable new powersettings */
-       out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg);
-
-       return 0;
-}
-
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-#define hardware_enable(_slot_)        /* No hardware to enable */
-#define hardware_disable(_slot_)       /* No hardware to disable */
-
-#endif                         /* CONFIG_MBX */
-
 #if defined(CONFIG_PRxK)
 #include <asm/cpld.h>
 extern volatile fpga_pc_regs *fpga_pc;
index 3338437b559b7e696e305b08e54ab1d874e67b4d..85772616efbf02b82c61af7e0846baed894c273e 100644 (file)
@@ -781,4 +781,12 @@ config APPLE_GMUX
          graphics as well as the backlight. Currently only backlight
          control is supported by the driver.
 
+config PVPANIC
+       tristate "pvpanic device support"
+       depends on ACPI
+       ---help---
+         This driver provides support for the pvpanic device.  pvpanic is
+         a paravirtualized device provided by QEMU; it lets a virtual machine
+         (guest) communicate panic events to the host.
+
 endif # X86_PLATFORM_DEVICES
index ace2b38942fe27f9cc02697f0f9543ca5eeff595..ef0ec746f78c49e5f6bf01fe28099ebbbee08318 100644 (file)
@@ -51,3 +51,5 @@ obj-$(CONFIG_INTEL_OAKTRAIL)  += intel_oaktrail.o
 obj-$(CONFIG_SAMSUNG_Q10)      += samsung-q10.o
 obj-$(CONFIG_APPLE_GMUX)       += apple-gmux.o
 obj-$(CONFIG_CHROMEOS_LAPTOP)  += chromeos_laptop.o
+
+obj-$(CONFIG_PVPANIC)           += pvpanic.o
index 210b5b872125e8aef1d751e56968f953686c4904..8fcb41e18b9cd6ad508001d33d86665f4955816c 100644 (file)
@@ -171,6 +171,15 @@ static struct dmi_system_id asus_quirks[] = {
                },
                .driver_data = &quirk_asus_x401u,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUSTeK COMPUTER INC. X75A",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X75A"),
+               },
+               .driver_data = &quirk_asus_x401u,
+       },
        {},
 };
 
index fa3ee6209572976b71d623585ecfb026842d5645..1134119521ac2e4da9708703d8b4252ac807ce15 100644 (file)
@@ -284,6 +284,7 @@ static void __init parse_da_table(const struct dmi_header *dm)
 {
        /* Final token is a terminator, so we don't want to copy it */
        int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
+       struct calling_interface_token *new_da_tokens;
        struct calling_interface_structure *table =
                container_of(dm, struct calling_interface_structure, header);
 
@@ -296,12 +297,13 @@ static void __init parse_da_table(const struct dmi_header *dm)
        da_command_address = table->cmdIOAddress;
        da_command_code = table->cmdIOCode;
 
-       da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
-                            sizeof(struct calling_interface_token),
-                            GFP_KERNEL);
+       new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
+                                sizeof(struct calling_interface_token),
+                                GFP_KERNEL);
 
-       if (!da_tokens)
+       if (!new_da_tokens)
                return;
+       da_tokens = new_da_tokens;
 
        memcpy(da_tokens+da_num_tokens, table->tokens,
               sizeof(struct calling_interface_token) * tokens);
index 3f945457f71c218116e7f8eb9d4a7400fcdc8b62..bcf8cc6b5537cca1a1c3ad016d700ca3f30104a4 100644 (file)
@@ -34,6 +34,14 @@ MODULE_LICENSE("GPL");
 #define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4"
 #define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8"
 
+struct dell_wmi_event {
+       u16     length;
+       /* 0x000: A hot key pressed or an event occurred
+        * 0x00F: A sequence of hot keys are pressed */
+       u16     type;
+       u16     event[];
+};
+
 static const char *dell_wmi_aio_guids[] = {
        EVENT_GUID1,
        EVENT_GUID2,
@@ -46,15 +54,41 @@ MODULE_ALIAS("wmi:"EVENT_GUID2);
 static const struct key_entry dell_wmi_aio_keymap[] = {
        { KE_KEY, 0xc0, { KEY_VOLUMEUP } },
        { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } },
+       { KE_KEY, 0xe030, { KEY_VOLUMEUP } },
+       { KE_KEY, 0xe02e, { KEY_VOLUMEDOWN } },
+       { KE_KEY, 0xe020, { KEY_MUTE } },
+       { KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } },
+       { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } },
+       { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } },
+       { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } },
        { KE_END, 0 }
 };
 
 static struct input_dev *dell_wmi_aio_input_dev;
 
+/*
+ * The new WMI event data format will follow the dell_wmi_event structure
+ * So, we will check if the buffer matches the format
+ */
+static bool dell_wmi_aio_event_check(u8 *buffer, int length)
+{
+       struct dell_wmi_event *event = (struct dell_wmi_event *)buffer;
+
+       if (event == NULL || length < 6)
+               return false;
+
+       if ((event->type == 0 || event->type == 0xf) &&
+                       event->length >= 2)
+               return true;
+
+       return false;
+}
+
 static void dell_wmi_aio_notify(u32 value, void *context)
 {
        struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
+       struct dell_wmi_event *event;
        acpi_status status;
 
        status = wmi_get_event_data(value, &response);
@@ -65,7 +99,7 @@ static void dell_wmi_aio_notify(u32 value, void *context)
 
        obj = (union acpi_object *)response.pointer;
        if (obj) {
-               unsigned int scancode;
+               unsigned int scancode = 0;
 
                switch (obj->type) {
                case ACPI_TYPE_INTEGER:
@@ -75,13 +109,22 @@ static void dell_wmi_aio_notify(u32 value, void *context)
                                scancode, 1, true);
                        break;
                case ACPI_TYPE_BUFFER:
-                       /* Broken machines return the scancode in a buffer */
-                       if (obj->buffer.pointer && obj->buffer.length > 0) {
-                               scancode = obj->buffer.pointer[0];
+                       if (dell_wmi_aio_event_check(obj->buffer.pointer,
+                                               obj->buffer.length)) {
+                               event = (struct dell_wmi_event *)
+                                       obj->buffer.pointer;
+                               scancode = event->event[0];
+                       } else {
+                               /* Broken machines return the scancode in a
+                                  buffer */
+                               if (obj->buffer.pointer &&
+                                               obj->buffer.length > 0)
+                                       scancode = obj->buffer.pointer[0];
+                       }
+                       if (scancode)
                                sparse_keymap_report_event(
                                        dell_wmi_aio_input_dev,
                                        scancode, 1, true);
-                       }
                        break;
                }
        }
index 1a779bbfb87d2b8de61e964734981f4f9a6c891e..8df0c5a21be27d7a2044b06b8ee21bc8fab90359 100644 (file)
@@ -71,6 +71,14 @@ enum hp_wmi_event_ids {
        HPWMI_WIRELESS = 5,
        HPWMI_CPU_BATTERY_THROTTLE = 6,
        HPWMI_LOCK_SWITCH = 7,
+       HPWMI_LID_SWITCH = 8,
+       HPWMI_SCREEN_ROTATION = 9,
+       HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A,
+       HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B,
+       HPWMI_PROXIMITY_SENSOR = 0x0C,
+       HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D,
+       HPWMI_PEAKSHIFT_PERIOD = 0x0F,
+       HPWMI_BATTERY_CHARGE_PERIOD = 0x10,
 };
 
 struct bios_args {
@@ -536,6 +544,22 @@ static void hp_wmi_notify(u32 value, void *context)
                break;
        case HPWMI_LOCK_SWITCH:
                break;
+       case HPWMI_LID_SWITCH:
+               break;
+       case HPWMI_SCREEN_ROTATION:
+               break;
+       case HPWMI_COOLSENSE_SYSTEM_MOBILE:
+               break;
+       case HPWMI_COOLSENSE_SYSTEM_HOT:
+               break;
+       case HPWMI_PROXIMITY_SENSOR:
+               break;
+       case HPWMI_BACKLIT_KB_BRIGHTNESS:
+               break;
+       case HPWMI_PEAKSHIFT_PERIOD:
+               break;
+       case HPWMI_BATTERY_CHARGE_PERIOD:
+               break;
        default:
                pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data);
                break;
index e64a7a870d42146c910ffb39b955c29a11a88080..a8e43cf70fac96309d7b689265b7c2b6ba2a19db 100644 (file)
@@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct device *dev)
 
 static int lis3lv02d_resume(struct device *dev)
 {
-       return lis3lv02d_poweron(&lis3_dev);
+       lis3lv02d_poweron(&lis3_dev);
+       return 0;
 }
 
 static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
index 17f00b8dc5cbcb841540f07c7330775bcb5393df..89c4519d48ac80d2a54f42ceabf7a48b792e871c 100644 (file)
@@ -640,7 +640,8 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
        for (bit = 0; bit < 16; bit++) {
                if (test_bit(bit, &value)) {
                        switch (bit) {
-                       case 6:
+                       case 0: /* Z580 */
+                       case 6: /* Z570 */
                                /* Thermal Management button */
                                ideapad_input_report(priv, 65);
                                break;
@@ -648,6 +649,9 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
                                /* OneKey Theater button */
                                ideapad_input_report(priv, 64);
                                break;
+                       default:
+                               pr_info("Unknown special button: %lu\n", bit);
+                               break;
                        }
                }
        }
diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c
new file mode 100644 (file)
index 0000000..47ae0c4
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ *  pvpanic.c - pvpanic Device Support
+ *
+ *  Copyright (C) 2013 Fujitsu.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
+MODULE_DESCRIPTION("pvpanic device driver");
+MODULE_LICENSE("GPL");
+
+static int pvpanic_add(struct acpi_device *device);
+static int pvpanic_remove(struct acpi_device *device);
+
+static const struct acpi_device_id pvpanic_device_ids[] = {
+       { "QEMU0001", 0 },
+       { "", 0 },
+};
+MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids);
+
+#define PVPANIC_PANICKED       (1 << 0)
+
+static u16 port;
+
+static struct acpi_driver pvpanic_driver = {
+       .name =         "pvpanic",
+       .class =        "QEMU",
+       .ids =          pvpanic_device_ids,
+       .ops =          {
+                               .add =          pvpanic_add,
+                               .remove =       pvpanic_remove,
+                       },
+       .owner =        THIS_MODULE,
+};
+
+static void
+pvpanic_send_event(unsigned int event)
+{
+       outb(event, port);
+}
+
+static int
+pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
+                    void *unused)
+{
+       pvpanic_send_event(PVPANIC_PANICKED);
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block pvpanic_panic_nb = {
+       .notifier_call = pvpanic_panic_notify,
+};
+
+
+static acpi_status
+pvpanic_walk_resources(struct acpi_resource *res, void *context)
+{
+       switch (res->type) {
+       case ACPI_RESOURCE_TYPE_END_TAG:
+               return AE_OK;
+
+       case ACPI_RESOURCE_TYPE_IO:
+               port = res->data.io.minimum;
+               return AE_OK;
+
+       default:
+               return AE_ERROR;
+       }
+}
+
+static int pvpanic_add(struct acpi_device *device)
+{
+       acpi_status status;
+       u64 ret;
+
+       status = acpi_evaluate_integer(device->handle, "_STA", NULL,
+                                      &ret);
+
+       if (ACPI_FAILURE(status) || (ret & 0x0B) != 0x0B)
+               return -ENODEV;
+
+       acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+                           pvpanic_walk_resources, NULL);
+
+       if (!port)
+               return -ENODEV;
+
+       atomic_notifier_chain_register(&panic_notifier_list,
+                                      &pvpanic_panic_nb);
+
+       return 0;
+}
+
+static int pvpanic_remove(struct acpi_device *device)
+{
+
+       atomic_notifier_chain_unregister(&panic_notifier_list,
+                                        &pvpanic_panic_nb);
+       return 0;
+}
+
+module_acpi_driver(pvpanic_driver);
index 5f770059fd4d3aa3115140fd5afcc7a7832eca53..1a90b62a71c66400268cad23c7d6690b1eec30d0 100644 (file)
@@ -176,10 +176,7 @@ static int __init samsungq10_init(void)
                                                   samsungq10_probe,
                                                   NULL, 0, NULL, 0);
 
-       if (IS_ERR(samsungq10_device))
-               return PTR_ERR(samsungq10_device);
-
-       return 0;
+       return PTR_RET(samsungq10_device);
 }
 
 static void __exit samsungq10_exit(void)
index d544e3aaf76141754b622aada806b04050fe2886..2ac045f27f10112aa467b867a9b97a9a1790c189 100644 (file)
@@ -1255,6 +1255,11 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
                        real_ev = __sony_nc_gfx_switch_status_get();
                        break;
 
+               case 0x015B:
+                       /* Hybrid GFX switching SVS151290S */
+                       ev_type = GFX_SWITCH;
+                       real_ev = __sony_nc_gfx_switch_status_get();
+                       break;
                default:
                        dprintk("Unknown event 0x%x for handle 0x%x\n",
                                        event, handle);
@@ -1353,6 +1358,7 @@ static void sony_nc_function_setup(struct acpi_device *device,
                        break;
                case 0x0128:
                case 0x0146:
+               case 0x015B:
                        result = sony_nc_gfx_switch_setup(pf_device, handle);
                        if (result)
                                pr_err("couldn't set up GFX Switch status (%d)\n",
@@ -1375,6 +1381,7 @@ static void sony_nc_function_setup(struct acpi_device *device,
                case 0x0143:
                case 0x014b:
                case 0x014c:
+               case 0x0163:
                        result = sony_nc_kbd_backlight_setup(pf_device, handle);
                        if (result)
                                pr_err("couldn't set up keyboard backlight function (%d)\n",
@@ -1426,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
                        break;
                case 0x0128:
                case 0x0146:
+               case 0x015B:
                        sony_nc_gfx_switch_cleanup(pd);
                        break;
                case 0x0131:
@@ -1439,6 +1447,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
                case 0x0143:
                case 0x014b:
                case 0x014c:
+               case 0x0163:
                        sony_nc_kbd_backlight_cleanup(pd);
                        break;
                default:
@@ -1485,6 +1494,7 @@ static void sony_nc_function_resume(void)
                case 0x0143:
                case 0x014b:
                case 0x014c:
+               case 0x0163:
                        sony_nc_kbd_backlight_resume();
                        break;
                default:
@@ -2390,7 +2400,9 @@ static int __sony_nc_gfx_switch_status_get(void)
 {
        unsigned int result;
 
-       if (sony_call_snc_handle(gfxs_ctl->handle, 0x0100, &result))
+       if (sony_call_snc_handle(gfxs_ctl->handle,
+                               gfxs_ctl->handle == 0x015B ? 0x0000 : 0x0100,
+                               &result))
                return -EIO;
 
        switch (gfxs_ctl->handle) {
@@ -2400,6 +2412,12 @@ static int __sony_nc_gfx_switch_status_get(void)
                 */
                return result & 0x1 ? SPEED : STAMINA;
                break;
+       case 0x015B:
+               /* 0: discrete GFX (speed)
+                * 1: integrated GFX (stamina)
+                */
+               return result & 0x1 ? STAMINA : SPEED;
+               break;
        case 0x0128:
                /* it's a more elaborated bitmask, for now:
                 * 2: integrated GFX (stamina)
index db95c547c09d6781d68dca561abe230c5d0f57ce..86af29f53bbebefec7d58ca0c595c3852e6fdf79 100644 (file)
@@ -1353,6 +1353,8 @@ config SCSI_LPFC
        tristate "Emulex LightPulse Fibre Channel Support"
        depends on PCI && SCSI
        select SCSI_FC_ATTRS
+       select GENERIC_CSUM
+       select CRC_T10DIF
        help
           This lpfc driver supports the Emulex LightPulse
           Family of Fibre Channel PCI host adapters.
index 64136c56e7065052bf24d0aa53e500f69f3957d0..33072388ea166a0cce3a2019782b4f64e5a91e33 100644 (file)
@@ -84,7 +84,7 @@ static void asd_set_ddb_type(struct domain_device *dev)
        struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
        int ddb = (int) (unsigned long) dev->lldd_dev;
 
-       if (dev->dev_type == SATA_PM_PORT)
+       if (dev->dev_type == SAS_SATA_PM_PORT)
                asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT);
        else if (dev->tproto)
                asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET);
@@ -116,7 +116,7 @@ void asd_set_dmamode(struct domain_device *dev)
        int ddb = (int) (unsigned long) dev->lldd_dev;
        u32 qdepth = 0;
 
-       if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) {
+       if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) {
                if (ata_id_has_ncq(ata_dev->id))
                        qdepth = ata_id_queue_depth(ata_dev->id);
                asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
@@ -140,8 +140,8 @@ static int asd_init_sata(struct domain_device *dev)
        int ddb = (int) (unsigned long) dev->lldd_dev;
 
        asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
-       if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
-           dev->dev_type == SATA_PM_PORT) {
+       if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
+           dev->dev_type == SAS_SATA_PM_PORT) {
                struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
                        dev->frame_rcvd;
                asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
@@ -174,7 +174,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
        asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask);
        if (dev->port->oob_mode != SATA_OOB_MODE) {
                flags |= OPEN_REQUIRED;
-               if ((dev->dev_type == SATA_DEV) ||
+               if ((dev->dev_type == SAS_SATA_DEV) ||
                    (dev->tproto & SAS_PROTOCOL_STP)) {
                        struct smp_resp *rps_resp = &dev->sata_dev.rps_resp;
                        if (rps_resp->frame_type == SMP_RESPONSE &&
@@ -188,8 +188,8 @@ static int asd_init_target_ddb(struct domain_device *dev)
                } else {
                        flags |= CONCURRENT_CONN_SUPP;
                        if (!dev->parent &&
-                           (dev->dev_type == EDGE_DEV ||
-                            dev->dev_type == FANOUT_DEV))
+                           (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                            dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE))
                                asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN,
                                                       4);
                        else
@@ -198,7 +198,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
                        asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1);
                }
        }
-       if (dev->dev_type == SATA_PM)
+       if (dev->dev_type == SAS_SATA_PM)
                flags |= SATA_MULTIPORT;
        asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags);
 
@@ -211,7 +211,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
        asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF);
        asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF);
 
-       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+       if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
                i = asd_init_sata(dev);
                if (i < 0) {
                        asd_free_ddb(asd_ha, ddb);
@@ -219,7 +219,7 @@ static int asd_init_target_ddb(struct domain_device *dev)
                }
        }
 
-       if (dev->dev_type == SAS_END_DEV) {
+       if (dev->dev_type == SAS_END_DEVICE) {
                struct sas_end_device *rdev = rphy_to_end_device(dev->rphy);
                if (rdev->I_T_nexus_loss_timeout > 0)
                        asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT,
@@ -328,10 +328,10 @@ int asd_dev_found(struct domain_device *dev)
 
        spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags);
        switch (dev->dev_type) {
-       case SATA_PM:
+       case SAS_SATA_PM:
                res = asd_init_sata_pm_ddb(dev);
                break;
-       case SATA_PM_PORT:
+       case SAS_SATA_PM_PORT:
                res = asd_init_sata_pm_port_ddb(dev);
                break;
        default:
index 81b736c76fffab7bf40be168ff1ae3fb2388faad..4df867e07b2022df773ce0f7f0115e2b7bf4a5e7 100644 (file)
@@ -74,7 +74,7 @@ static void asd_init_phy_identify(struct asd_phy *phy)
 
        memset(phy->identify_frame, 0, sizeof(*phy->identify_frame));
 
-       phy->identify_frame->dev_type = SAS_END_DEV;
+       phy->identify_frame->dev_type = SAS_END_DEVICE;
        if (phy->sas_phy.role & PHY_ROLE_INITIATOR)
                phy->identify_frame->initiator_bits = phy->sas_phy.iproto;
        if (phy->sas_phy.role & PHY_ROLE_TARGET)
index cf9040933da6497ca500454274fe163c50ec324d..d4c35df3d4ae600f9e5d0d7a02ed547f4c8ff905 100644 (file)
@@ -184,7 +184,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
        struct sas_phy *phy = sas_get_local_phy(dev);
        /* Standard mandates link reset for ATA  (type 0) and
         * hard reset for SSP (type 1) */
-       int reset_type = (dev->dev_type == SATA_DEV ||
+       int reset_type = (dev->dev_type == SAS_SATA_DEV ||
                          (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
 
        asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
index f1733dfa3ae24489b44b3f6bee92568589c7ade1..777e7c0bbb4b327d6624a2d771a78fa5033d432c 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
index 5c87768c109c0fa9a8fc0dda6f8bcb6c357ca083..e66aa7c11a8a21e041f8956c6ed0f758401c8420 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -155,6 +155,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
        uint16_t status = 0, addl_status = 0, wrb_num = 0;
        struct be_mcc_wrb *temp_wrb;
        struct be_cmd_req_hdr *ioctl_hdr;
+       struct be_cmd_resp_hdr *ioctl_resp_hdr;
        struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
        if (beiscsi_error(phba))
@@ -204,6 +205,12 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                            ioctl_hdr->subsystem,
                            ioctl_hdr->opcode,
                            status, addl_status);
+
+               if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+                       ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr;
+                       if (ioctl_resp_hdr->response_length)
+                               goto release_mcc_tag;
+               }
                rc = -EAGAIN;
        }
 
@@ -267,6 +274,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
+       struct be_cmd_resp_hdr *resp_hdr;
 
        be_dws_le_to_cpu(compl, 4);
 
@@ -284,6 +292,11 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
                            hdr->subsystem, hdr->opcode,
                            compl_status, extd_status);
 
+               if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+                       resp_hdr = (struct be_cmd_resp_hdr *) hdr;
+                       if (resp_hdr->response_length)
+                               return 0;
+               }
                return -EBUSY;
        }
        return 0;
@@ -335,30 +348,26 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
 void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
                struct be_async_event_link_state *evt)
 {
-       switch (evt->port_link_status) {
-       case ASYNC_EVENT_LINK_DOWN:
+       if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
+           ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
+            (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
+               phba->state = BE_ADAPTER_LINK_DOWN;
+
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Link Down on Physical Port %d\n",
+                           "BC_%d : Link Down on Port %d\n",
                            evt->physical_port);
 
-               phba->state |= BE_ADAPTER_LINK_DOWN;
                iscsi_host_for_each_session(phba->shost,
                                            be2iscsi_fail_session);
-               break;
-       case ASYNC_EVENT_LINK_UP:
+       } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
+                   ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
+                    (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
                phba->state = BE_ADAPTER_UP;
+
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Link UP on Physical Port %d\n",
-                           evt->physical_port);
-               break;
-       default:
-               beiscsi_log(phba, KERN_ERR,
-                           BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-                           "BC_%d : Unexpected Async Notification %d on"
-                           "Physical Port %d\n",
-                           evt->port_link_status,
+                           "BC_%d : Link UP on Port %d\n",
                            evt->physical_port);
        }
 }
@@ -479,7 +488,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
 {
        void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
-       int wait = 0;
+       uint32_t wait = 0;
        u32 ready;
 
        do {
@@ -527,6 +536,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
        struct be_mcc_compl *compl = &mbox->compl;
        struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 
+       status = be_mbox_db_ready_wait(ctrl);
+       if (status)
+               return status;
+
        val &= ~MPU_MAILBOX_DB_RDY_MASK;
        val |= MPU_MAILBOX_DB_HI_MASK;
        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -580,6 +593,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba)
        struct be_mcc_compl *compl = &mbox->compl;
        struct be_ctrl_info *ctrl = &phba->ctrl;
 
+       status = be_mbox_db_ready_wait(ctrl);
+       if (status)
+               return status;
+
        val |= MPU_MAILBOX_DB_HI_MASK;
        /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
        val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -732,6 +749,16 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
        return status;
 }
 
+/**
+ * be_cmd_fw_initialize()- Initialize FW
+ * @ctrl: Pointer to function control structure
+ *
+ * Send FW initialize pattern for the function.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ **/
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
 {
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -762,6 +789,47 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
        return status;
 }
 
+/**
+ * be_cmd_fw_uninit()- Uinitialize FW
+ * @ctrl: Pointer to function control structure
+ *
+ * Send FW uninitialize pattern for the function
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero value
+ **/
+int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
+{
+       struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+       int status;
+       u8 *endian_check;
+
+       spin_lock(&ctrl->mbox_lock);
+       memset(wrb, 0, sizeof(*wrb));
+
+       endian_check = (u8 *) wrb;
+       *endian_check++ = 0xFF;
+       *endian_check++ = 0xAA;
+       *endian_check++ = 0xBB;
+       *endian_check++ = 0xFF;
+       *endian_check++ = 0xFF;
+       *endian_check++ = 0xCC;
+       *endian_check++ = 0xDD;
+       *endian_check = 0xFF;
+
+       be_dws_cpu_to_le(wrb, sizeof(*wrb));
+
+       status = be_mbox_notify(ctrl);
+       if (status)
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BC_%d : be_cmd_fw_uninit Failed\n");
+
+       spin_unlock(&ctrl->mbox_lock);
+       return status;
+}
+
 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                          struct be_queue_info *cq, struct be_queue_info *eq,
                          bool sol_evts, bool no_delay, int coalesce_wm)
@@ -783,20 +851,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                        OPCODE_COMMON_CQ_CREATE, sizeof(*req));
 
        req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
-       if (chip_skh_r(ctrl->pdev)) {
-               req->hdr.version = MBX_CMD_VER2;
-               req->page_size = 1;
-               AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
-                             ctxt, coalesce_wm);
-               AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
-                             ctxt, no_delay);
-               AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
-                             __ilog2_u32(cq->len / 256));
-               AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
-               AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
-               AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
-               AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
-       } else {
+       if (is_chip_be2_be3r(phba)) {
                AMAP_SET_BITS(struct amap_cq_context, coalescwm,
                              ctxt, coalesce_wm);
                AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
@@ -809,6 +864,19 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
                AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
                AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
                              PCI_FUNC(ctrl->pdev->devfn));
+       } else {
+               req->hdr.version = MBX_CMD_VER2;
+               req->page_size = 1;
+               AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
+                             ctxt, coalesce_wm);
+               AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
+                             ctxt, no_delay);
+               AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
+                             __ilog2_u32(cq->len / 256));
+               AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+               AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
+               AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
        }
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -949,6 +1017,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
        struct be_defq_create_req *req = embedded_payload(wrb);
        struct be_dma_mem *q_mem = &dq->dma_mem;
+       struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
        void *ctxt = &req->context;
        int status;
 
@@ -961,17 +1030,36 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
                           OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
 
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
-                     1);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
-                     PCI_FUNC(ctrl->pdev->devfn));
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
-                     be_encoded_q_len(length / sizeof(struct phys_addr)));
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
-                     ctxt, entry_size);
-       AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
-                     cq->id);
+
+       if (is_chip_be2_be3r(phba)) {
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             rx_pdid, ctxt, 0);
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             rx_pdid_valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             ring_size, ctxt,
+                             be_encoded_q_len(length /
+                             sizeof(struct phys_addr)));
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             default_buffer_size, ctxt, entry_size);
+               AMAP_SET_BITS(struct amap_be_default_pdu_context,
+                             cq_id_recv, ctxt, cq->id);
+       } else {
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             rx_pdid, ctxt, 0);
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             rx_pdid_valid, ctxt, 1);
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             ring_size, ctxt,
+                             be_encoded_q_len(length /
+                             sizeof(struct phys_addr)));
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             default_buffer_size, ctxt, entry_size);
+               AMAP_SET_BITS(struct amap_default_pdu_context_ext,
+                             cq_id_recv, ctxt, cq->id);
+       }
 
        be_dws_cpu_to_le(ctxt, sizeof(req->context));
 
index 23397d51ac54f5f9550f15b3b4571ed8342ee762..99073086dfe06522621e57b4e8a8e2d968f7c1c7 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -52,6 +52,10 @@ struct be_mcc_wrb {
 
 /* Completion Status */
 #define MCC_STATUS_SUCCESS 0x0
+#define MCC_STATUS_FAILED 0x1
+#define MCC_STATUS_ILLEGAL_REQUEST 0x2
+#define MCC_STATUS_ILLEGAL_FIELD 0x3
+#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
 
 #define CQE_STATUS_COMPL_MASK 0xFFFF
 #define CQE_STATUS_COMPL_SHIFT 0       /* bits 0 - 15 */
@@ -118,7 +122,8 @@ struct be_async_event_trailer {
 
 enum {
        ASYNC_EVENT_LINK_DOWN = 0x0,
-       ASYNC_EVENT_LINK_UP = 0x1
+       ASYNC_EVENT_LINK_UP = 0x1,
+       ASYNC_EVENT_LOGICAL = 0x2
 };
 
 /**
@@ -130,6 +135,9 @@ struct be_async_event_link_state {
        u8 port_link_status;
        u8 port_duplex;
        u8 port_speed;
+#define BEISCSI_PHY_LINK_FAULT_NONE    0x00
+#define BEISCSI_PHY_LINK_FAULT_LOCAL   0x01
+#define BEISCSI_PHY_LINK_FAULT_REMOTE  0x02
        u8 port_fault;
        u8 rsvd0[7];
        struct be_async_event_trailer trailer;
@@ -697,6 +705,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                        uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
 /*ISCSI Functuions */
 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
+int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
 
 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
 struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
@@ -751,6 +760,18 @@ struct amap_be_default_pdu_context {
        u8 rsvd4[32];           /* dword 3 */
 } __packed;
 
+struct amap_default_pdu_context_ext {
+       u8 rsvd0[16];   /* dword 0 */
+       u8 ring_size[4];    /* dword 0 */
+       u8 rsvd1[12];   /* dword 0 */
+       u8 rsvd2[22];   /* dword 1 */
+       u8 rx_pdid[9];  /* dword 1 */
+       u8 rx_pdid_valid;   /* dword 1 */
+       u8 default_buffer_size[16]; /* dword 2 */
+       u8 cq_id_recv[16];  /* dword 2 */
+       u8 rsvd3[32];   /* dword 3 */
+} __packed;
+
 struct be_defq_create_req {
        struct be_cmd_req_hdr hdr;
        u16 num_pages;
@@ -896,7 +917,7 @@ struct amap_it_dmsg_cqe_v2 {
  * stack to notify the
  * controller of a posted Work Request Block
  */
-#define DB_WRB_POST_CID_MASK           0x3FF   /* bits 0 - 9 */
+#define DB_WRB_POST_CID_MASK           0xFFFF  /* bits 0 - 16 */
 #define DB_DEF_PDU_WRB_INDEX_MASK      0xFF    /* bits 0 - 9 */
 
 #define DB_DEF_PDU_WRB_INDEX_SHIFT     16
index 9014690fe841f08e8993bcfd2153cba785f6135e..ef36be003f67a49f0d37cf1ed1136c87894bd942 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -161,7 +161,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
                                struct beiscsi_conn *beiscsi_conn,
                                unsigned int cid)
 {
-       if (phba->conn_table[cid]) {
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+       if (phba->conn_table[cri_index]) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : Connection table already occupied. Detected clash\n");
 
@@ -169,9 +171,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
        } else {
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
                            "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
-                           cid, beiscsi_conn);
+                           cri_index, beiscsi_conn);
 
-               phba->conn_table[cid] = beiscsi_conn;
+               phba->conn_table[cri_index] = beiscsi_conn;
        }
        return 0;
 }
@@ -990,9 +992,27 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
 static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
 {
        struct beiscsi_hba *phba = beiscsi_ep->phba;
+       struct beiscsi_conn *beiscsi_conn;
 
        beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
        beiscsi_ep->phba = NULL;
+       phba->ep_array[BE_GET_CRI_FROM_CID
+                      (beiscsi_ep->ep_cid)] = NULL;
+
+       /**
+        * Check if any connection resource allocated by driver
+        * is to be freed.This case occurs when target redirection
+        * or connection retry is done.
+        **/
+       if (!beiscsi_ep->conn)
+               return;
+
+       beiscsi_conn = beiscsi_ep->conn;
+       if (beiscsi_conn->login_in_progress) {
+               beiscsi_free_mgmt_task_handles(beiscsi_conn,
+                                              beiscsi_conn->task);
+               beiscsi_conn->login_in_progress = 0;
+       }
 }
 
 /**
@@ -1009,7 +1029,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
 {
        struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
        struct beiscsi_hba *phba = beiscsi_ep->phba;
-       struct be_mcc_wrb *wrb;
        struct tcp_connect_and_offload_out *ptcpcnct_out;
        struct be_dma_mem nonemb_cmd;
        unsigned int tag;
@@ -1029,15 +1048,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                    "BS_%d : In beiscsi_open_conn, ep_cid=%d\n",
                    beiscsi_ep->ep_cid);
 
-       phba->ep_array[beiscsi_ep->ep_cid -
-                      phba->fw_config.iscsi_cid_start] = ep;
-       if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
-                                 phba->params.cxns_per_ctrl * 2)) {
-
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : Failed in allocate iscsi cid\n");
-               goto free_ep;
-       }
+       phba->ep_array[BE_GET_CRI_FROM_CID
+                      (beiscsi_ep->ep_cid)] = ep;
 
        beiscsi_ep->cid_vld = 0;
        nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -1049,24 +1061,24 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                            "BS_%d : Failed to allocate memory for"
                            " mgmt_open_connection\n");
 
-               beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
+               beiscsi_free_ep(beiscsi_ep);
                return -ENOMEM;
        }
        nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
        memset(nonemb_cmd.va, 0, nonemb_cmd.size);
        tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
-       if (!tag) {
+       if (tag <= 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : mgmt_open_connection Failed for cid=%d\n",
                            beiscsi_ep->ep_cid);
 
-               beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
+               beiscsi_free_ep(beiscsi_ep);
                return -EAGAIN;
        }
 
-       ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+       ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
        if (ret) {
                beiscsi_log(phba, KERN_ERR,
                            BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -1074,10 +1086,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
 
                pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                            nonemb_cmd.va, nonemb_cmd.dma);
-               goto free_ep;
+               beiscsi_free_ep(beiscsi_ep);
+               return -EBUSY;
        }
 
-       ptcpcnct_out = embedded_payload(wrb);
+       ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va;
        beiscsi_ep = ep->dd_data;
        beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
        beiscsi_ep->cid_vld = 1;
@@ -1087,10 +1100,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
        pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
                            nonemb_cmd.va, nonemb_cmd.dma);
        return 0;
-
-free_ep:
-       beiscsi_free_ep(beiscsi_ep);
-       return -EBUSY;
 }
 
 /**
@@ -1119,6 +1128,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
                return ERR_PTR(ret);
        }
 
+       if (beiscsi_error(phba)) {
+               ret = -EIO;
+               beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+                           "BS_%d : The FW state Not Stable!!!\n");
+               return ERR_PTR(ret);
+       }
+
        if (phba->state != BE_ADAPTER_UP) {
                ret = -EBUSY;
                beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
@@ -1201,8 +1217,10 @@ static int beiscsi_close_conn(struct  beiscsi_endpoint *beiscsi_ep, int flag)
 static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
                                      unsigned int cid)
 {
-       if (phba->conn_table[cid])
-               phba->conn_table[cid] = NULL;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+
+       if (phba->conn_table[cri_index])
+               phba->conn_table[cri_index] = NULL;
        else {
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
                            "BS_%d : Connection table Not occupied.\n");
index 38eab7232159dd7ec412e798a3a521cc8bab862f..31ddc84943989b94eaa4682b91d1a8461c7d066c 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
index 4e2733d2300365761e7dba5df79e3a1b606c67e2..d24a2867bc21cb885506f9d35d0fbe397e27400a 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -153,10 +153,14 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
 
 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
+DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
+DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL);
 struct device_attribute *beiscsi_attrs[] = {
        &dev_attr_beiscsi_log_enable,
        &dev_attr_beiscsi_drvr_ver,
        &dev_attr_beiscsi_adapter_family,
+       &dev_attr_beiscsi_fw_ver,
+       &dev_attr_beiscsi_active_cid_count,
        NULL,
 };
 
@@ -702,7 +706,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
                                    + BE2_TMFS
                                    + BE2_NOPOUT_REQ));
        phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
-       phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
+       phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;
        phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
        phba->params.num_sge_per_io = BE2_SGE;
        phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
@@ -1032,7 +1036,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba,
 static unsigned int
 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
                          struct beiscsi_hba *phba,
-                         unsigned short cid,
                          struct pdu_base *ppdu,
                          unsigned long pdu_len,
                          void *pbuffer, unsigned long buf_len)
@@ -1144,9 +1147,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
        struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pwrb_context = &phwi_ctrlr->wrb_context[cid];
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        if (pwrb_context->wrb_handles_available >= 2) {
                pwrb_handle = pwrb_context->pwrb_handle_base[
                                            pwrb_context->alloc_index];
@@ -1322,8 +1326,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
        hdr->t2retain = 0;
        hdr->flags = csol_cqe->i_flags;
        hdr->response = csol_cqe->i_resp;
-       hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
-       hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1);
+       hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+       hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+                                    csol_cqe->cmd_wnd - 1);
 
        hdr->dlength[0] = 0;
        hdr->dlength[1] = 0;
@@ -1346,9 +1351,9 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
        hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
        hdr->flags = csol_cqe->i_flags;
        hdr->response = csol_cqe->i_resp;
-       hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
-       hdr->max_cmdsn = (csol_cqe->exp_cmdsn +
-                         csol_cqe->cmd_wnd - 1);
+       hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+       hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+                                    csol_cqe->cmd_wnd - 1);
 
        hdr->itt = io_task->libiscsi_itt;
        __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
@@ -1363,35 +1368,29 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
        struct hwi_controller *phwi_ctrlr;
        struct iscsi_task *task;
        struct beiscsi_io_task *io_task;
-       struct iscsi_conn *conn = beiscsi_conn->conn;
-       struct iscsi_session *session = conn->session;
-       uint16_t wrb_index, cid;
+       uint16_t wrb_index, cid, cri_index;
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       if (chip_skh_r(phba->pcidev)) {
-               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+       if (is_chip_be2_be3r(phba)) {
+               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
                                          wrb_idx, psol);
-               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
                                    cid, psol);
        } else {
-               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+               wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
                                          wrb_idx, psol);
-               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+               cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
                                    cid, psol);
        }
 
-       pwrb_context = &phwi_ctrlr->wrb_context[
-                       cid - phba->fw_config.iscsi_cid_start];
+       cri_index = BE_GET_CRI_FROM_CID(cid);
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
        task = pwrb_handle->pio_handle;
 
        io_task = task->dd_data;
-       spin_lock_bh(&phba->mgmt_sgl_lock);
-       free_mgmt_sgl_handle(phba, io_task->psgl_handle);
-       spin_unlock_bh(&phba->mgmt_sgl_lock);
-       spin_lock_bh(&session->lock);
-       free_wrb_handle(phba, pwrb_context, pwrb_handle);
-       spin_unlock_bh(&session->lock);
+       memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
+       iscsi_put_task(task);
 }
 
 static void
@@ -1406,8 +1405,8 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
        hdr = (struct iscsi_nopin *)task->hdr;
        hdr->flags = csol_cqe->i_flags;
        hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
-       hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn +
-                        csol_cqe->cmd_wnd - 1);
+       hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
+                                    csol_cqe->cmd_wnd - 1);
 
        hdr->opcode = ISCSI_OP_NOOP_IN;
        hdr->itt = io_task->libiscsi_itt;
@@ -1418,7 +1417,26 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
                struct sol_cqe *psol,
                struct common_sol_cqe *csol_cqe)
 {
-       if (chip_skh_r(phba->pcidev)) {
+       if (is_chip_be2_be3r(phba)) {
+               csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                   i_exp_cmd_sn, psol);
+               csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                 i_res_cnt, psol);
+               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                 i_cmd_wnd, psol);
+               csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                   wrb_index, psol);
+               csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
+                                             cid, psol);
+               csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                hw_sts, psol);
+               csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                i_resp, psol);
+               csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+                                               i_sts, psol);
+               csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
+                                                 i_flags, psol);
+       } else {
                csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                    i_exp_cmd_sn, psol);
                csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
@@ -1429,7 +1447,7 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
                                              cid, psol);
                csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                 hw_sts, psol);
-               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                  i_cmd_wnd, psol);
                if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                  cmd_cmpl, psol))
@@ -1445,25 +1463,6 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
                if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                  o, psol))
                        csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
-       } else {
-               csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                   i_exp_cmd_sn, psol);
-               csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                 i_res_cnt, psol);
-               csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                 i_cmd_wnd, psol);
-               csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                   wrb_index, psol);
-               csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
-                                             cid, psol);
-               csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                hw_sts, psol);
-               csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                i_resp, psol);
-               csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
-                                               i_sts, psol);
-               csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
-                                                 i_flags, psol);
        }
 }
 
@@ -1480,14 +1479,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
        struct iscsi_conn *conn = beiscsi_conn->conn;
        struct iscsi_session *session = conn->session;
        struct common_sol_cqe csol_cqe = {0};
+       uint16_t cri_index = 0;
 
        phwi_ctrlr = phba->phwi_ctrlr;
 
        /* Copy the elements to a common structure */
        adapter_get_sol_cqe(phba, psol, &csol_cqe);
 
-       pwrb_context = &phwi_ctrlr->wrb_context[
-                       csol_cqe.cid - phba->fw_config.iscsi_cid_start];
+       cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 
        pwrb_handle = pwrb_context->pwrb_handle_basestd[
                      csol_cqe.wrb_index];
@@ -1561,15 +1561,15 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
        unsigned char is_header = 0;
        unsigned int index, dpl;
 
-       if (chip_skh_r(phba->pcidev)) {
-               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+       if (is_chip_be2_be3r(phba)) {
+               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
                                    dpl, pdpdu_cqe);
-               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
                                      index, pdpdu_cqe);
        } else {
-               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+               dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
                                    dpl, pdpdu_cqe);
-               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+               index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
                                      index, pdpdu_cqe);
        }
 
@@ -1613,8 +1613,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
 
        WARN_ON(!pasync_handle);
 
-       pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
-                                            phba->fw_config.iscsi_cid_start;
+       pasync_handle->cri =
+                       BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
        pasync_handle->is_header = is_header;
        pasync_handle->buffer_len = dpl;
        *pcq_index = index;
@@ -1856,8 +1856,6 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
        }
 
        status = beiscsi_process_async_pdu(beiscsi_conn, phba,
-                                          (beiscsi_conn->beiscsi_conn_cid -
-                                           phba->fw_config.iscsi_cid_start),
                                            phdr, hdr_len, pfirst_buffer,
                                            offset);
 
@@ -2011,6 +2009,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
        unsigned int num_processed = 0;
        unsigned int tot_nump = 0;
        unsigned short code = 0, cid = 0;
+       uint16_t cri_index = 0;
        struct beiscsi_conn *beiscsi_conn;
        struct beiscsi_endpoint *beiscsi_ep;
        struct iscsi_endpoint *ep;
@@ -2028,7 +2027,9 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                         32] & CQE_CODE_MASK);
 
                 /* Get the CID */
-               if (chip_skh_r(phba->pcidev)) {
+               if (is_chip_be2_be3r(phba)) {
+                       cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+               } else {
                        if ((code == DRIVERMSG_NOTIFY) ||
                            (code == UNSOL_HDR_NOTIFY) ||
                            (code == UNSOL_DATA_NOTIFY))
@@ -2038,10 +2039,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
                         else
                                 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
                                                     cid, sol);
-                  } else
-                        cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+               }
 
-               ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
+               cri_index = BE_GET_CRI_FROM_CID(cid);
+               ep = phba->ep_array[cri_index];
                beiscsi_ep = ep->dd_data;
                beiscsi_conn = beiscsi_ep->conn;
 
@@ -2191,7 +2192,7 @@ void beiscsi_process_all_cqs(struct work_struct *work)
 
 static int be_iopoll(struct blk_iopoll *iop, int budget)
 {
-       static unsigned int ret;
+       unsigned int ret;
        struct beiscsi_hba *phba;
        struct be_eq_obj *pbe_eq;
 
@@ -2416,11 +2417,11 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
                /* Check for the data_count */
                dsp_value = (task->data_count) ? 1 : 0;
 
-               if (chip_skh_r(phba->pcidev))
-                       AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
+               if (is_chip_be2_be3r(phba))
+                       AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
                                      pwrb, dsp_value);
                else
-                       AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
+                       AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
                                      pwrb, dsp_value);
 
                /* Map addr only if there is data_count */
@@ -2538,8 +2539,9 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
 
 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
 {
-       struct be_mem_descriptor *mem_descr;
        dma_addr_t bus_add;
+       struct hwi_controller *phwi_ctrlr;
+       struct be_mem_descriptor *mem_descr;
        struct mem_array *mem_arr, *mem_arr_orig;
        unsigned int i, j, alloc_size, curr_alloc_size;
 
@@ -2547,9 +2549,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
        if (!phba->phwi_ctrlr)
                return -ENOMEM;
 
+       /* Allocate memory for wrb_context */
+       phwi_ctrlr = phba->phwi_ctrlr;
+       phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
+                                         phba->params.cxns_per_ctrl,
+                                         GFP_KERNEL);
+       if (!phwi_ctrlr->wrb_context)
+               return -ENOMEM;
+
        phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
                                 GFP_KERNEL);
        if (!phba->init_mem) {
+               kfree(phwi_ctrlr->wrb_context);
                kfree(phba->phwi_ctrlr);
                return -ENOMEM;
        }
@@ -2558,6 +2569,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
                               GFP_KERNEL);
        if (!mem_arr_orig) {
                kfree(phba->init_mem);
+               kfree(phwi_ctrlr->wrb_context);
                kfree(phba->phwi_ctrlr);
                return -ENOMEM;
        }
@@ -2628,6 +2640,7 @@ free_mem:
        }
        kfree(mem_arr_orig);
        kfree(phba->init_mem);
+       kfree(phba->phwi_ctrlr->wrb_context);
        kfree(phba->phwi_ctrlr);
        return -ENOMEM;
 }
@@ -2666,6 +2679,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba)
 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
 {
        struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
+       struct hwi_context_memory *phwi_ctxt;
        struct wrb_handle *pwrb_handle = NULL;
        struct hwi_controller *phwi_ctrlr;
        struct hwi_wrb_context *pwrb_context;
@@ -2680,7 +2694,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
        mem_descr_wrb += HWI_MEM_WRB;
        phwi_ctrlr = phba->phwi_ctrlr;
 
-       for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+       /* Allocate memory for WRBQ */
+       phwi_ctxt = phwi_ctrlr->phwi_ctxt;
+       phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
+                                    phba->fw_config.iscsi_cid_count,
+                                    GFP_KERNEL);
+       if (!phwi_ctxt->be_wrbq) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : WRBQ Mem Alloc Failed\n");
+               return -ENOMEM;
+       }
+
+       for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                pwrb_context->pwrb_handle_base =
                                kzalloc(sizeof(struct wrb_handle *) *
@@ -2723,7 +2748,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
                }
        }
        idx = 0;
-       for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+       for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                if (!num_cxn_wrb) {
                        pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
@@ -2752,7 +2777,7 @@ init_wrb_hndl_failed:
        return -ENOMEM;
 }
 
-static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
+static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 {
        struct hwi_controller *phwi_ctrlr;
        struct hba_parameters *p = &phba->params;
@@ -2770,6 +2795,15 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
        memset(pasync_ctx, 0, sizeof(*pasync_ctx));
 
+       pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) *
+                                         phba->fw_config.iscsi_cid_count,
+                                         GFP_KERNEL);
+       if (!pasync_ctx->async_entry) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n");
+               return -ENOMEM;
+       }
+
        pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
        pasync_ctx->buffer_size = p->defpdu_hdr_sz;
 
@@ -2934,6 +2968,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
        pasync_ctx->async_header.ep_read_ptr = -1;
        pasync_ctx->async_data.host_write_ptr = 0;
        pasync_ctx->async_data.ep_read_ptr = -1;
+
+       return 0;
 }
 
 static int
@@ -3293,6 +3329,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
        void *wrb_vaddr;
        struct be_dma_mem sgl;
        struct be_mem_descriptor *mem_descr;
+       struct hwi_wrb_context *pwrb_context;
        int status;
 
        idx = 0;
@@ -3351,8 +3388,9 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
                        kfree(pwrb_arr);
                        return status;
                }
-               phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
-                                                                  id;
+               pwrb_context = &phwi_ctrlr->wrb_context[i];
+               pwrb_context->cid = phwi_context->be_wrbq[i].id;
+               BE_SET_CID_TO_CRI(i, pwrb_context->cid);
        }
        kfree(pwrb_arr);
        return 0;
@@ -3365,7 +3403,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
        struct hwi_wrb_context *pwrb_context;
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
+       for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
                pwrb_context = &phwi_ctrlr->wrb_context[index];
                kfree(pwrb_context->pwrb_handle_base);
                kfree(pwrb_context->pwrb_handle_basestd);
@@ -3394,6 +3432,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
+       struct hwi_async_pdu_context *pasync_ctx;
        int i, eq_num;
 
        phwi_ctrlr = phba->phwi_ctrlr;
@@ -3403,6 +3442,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
                if (q->created)
                        beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
        }
+       kfree(phwi_context->be_wrbq);
        free_wrb_handles(phba);
 
        q = &phwi_context->be_def_hdrq;
@@ -3430,6 +3470,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
                        beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
        }
        be_mcc_queues_destroy(phba);
+
+       pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
+       kfree(pasync_ctx->async_entry);
+       be_cmd_fw_uninit(ctrl);
 }
 
 static int be_mcc_queues_create(struct beiscsi_hba *phba,
@@ -3607,7 +3651,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba)
        if (beiscsi_init_wrb_handle(phba))
                return -ENOMEM;
 
-       hwi_init_async_pdu_ctx(phba);
+       if (hwi_init_async_pdu_ctx(phba)) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : hwi_init_async_pdu_ctx failed\n");
+               return -ENOMEM;
+       }
+
        if (hwi_init_port(phba) != 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : hwi_init_controller failed\n");
@@ -3637,6 +3686,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
                mem_descr++;
        }
        kfree(phba->init_mem);
+       kfree(phba->phwi_ctrlr->wrb_context);
        kfree(phba->phwi_ctrlr);
 }
 
@@ -3769,7 +3819,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
 
 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
 {
-       int i, new_cid;
+       int i;
 
        phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
                                  GFP_KERNEL);
@@ -3780,19 +3830,33 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
                return -ENOMEM;
        }
        phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
-                                phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
+                                phba->params.cxns_per_ctrl, GFP_KERNEL);
        if (!phba->ep_array) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : Failed to allocate memory in "
                            "hba_setup_cid_tbls\n");
                kfree(phba->cid_array);
+               phba->cid_array = NULL;
                return -ENOMEM;
        }
-       new_cid = phba->fw_config.iscsi_cid_start;
-       for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
-               phba->cid_array[i] = new_cid;
-               new_cid += 2;
+
+       phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
+                                  phba->params.cxns_per_ctrl, GFP_KERNEL);
+       if (!phba->conn_table) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : Failed to allocate memory in"
+                           "hba_setup_cid_tbls\n");
+
+               kfree(phba->cid_array);
+               kfree(phba->ep_array);
+               phba->cid_array = NULL;
+               phba->ep_array = NULL;
+               return -ENOMEM;
        }
+
+       for (i = 0; i < phba->params.cxns_per_ctrl; i++)
+               phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid;
+
        phba->avlbl_cids = phba->params.cxns_per_ctrl;
        return 0;
 }
@@ -4062,6 +4126,53 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
        kfree(phba->eh_sgl_hndl_base);
        kfree(phba->cid_array);
        kfree(phba->ep_array);
+       kfree(phba->conn_table);
+}
+
+/**
+ * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
+ * @beiscsi_conn: ptr to the conn to be cleaned up
+ * @task: ptr to iscsi_task resource to be freed.
+ *
+ * Free driver mgmt resources binded to CXN.
+ **/
+void
+beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
+                               struct iscsi_task *task)
+{
+       struct beiscsi_io_task *io_task;
+       struct beiscsi_hba *phba = beiscsi_conn->phba;
+       struct hwi_wrb_context *pwrb_context;
+       struct hwi_controller *phwi_ctrlr;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(
+                               beiscsi_conn->beiscsi_conn_cid);
+
+       phwi_ctrlr = phba->phwi_ctrlr;
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
+
+       io_task = task->dd_data;
+
+       if (io_task->pwrb_handle) {
+               memset(io_task->pwrb_handle->pwrb, 0,
+                      sizeof(struct iscsi_wrb));
+               free_wrb_handle(phba, pwrb_context,
+                               io_task->pwrb_handle);
+               io_task->pwrb_handle = NULL;
+       }
+
+       if (io_task->psgl_handle) {
+               spin_lock_bh(&phba->mgmt_sgl_lock);
+               free_mgmt_sgl_handle(phba,
+                                    io_task->psgl_handle);
+               io_task->psgl_handle = NULL;
+               spin_unlock_bh(&phba->mgmt_sgl_lock);
+       }
+
+       if (io_task->mtask_addr)
+               pci_unmap_single(phba->pcidev,
+                                io_task->mtask_addr,
+                                io_task->mtask_data_count,
+                                PCI_DMA_TODEVICE);
 }
 
 /**
@@ -4078,10 +4189,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
        struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
+       uint16_t cri_index = BE_GET_CRI_FROM_CID(
+                            beiscsi_conn->beiscsi_conn_cid);
 
        phwi_ctrlr = phba->phwi_ctrlr;
-       pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
-                       - phba->fw_config.iscsi_cid_start];
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 
        if (io_task->cmd_bhs) {
                pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
@@ -4103,27 +4215,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
                        io_task->psgl_handle = NULL;
                }
        } else {
-               if (!beiscsi_conn->login_in_progress) {
-                       if (io_task->pwrb_handle) {
-                               free_wrb_handle(phba, pwrb_context,
-                                               io_task->pwrb_handle);
-                               io_task->pwrb_handle = NULL;
-                       }
-                       if (io_task->psgl_handle) {
-                               spin_lock(&phba->mgmt_sgl_lock);
-                               free_mgmt_sgl_handle(phba,
-                                                    io_task->psgl_handle);
-                               spin_unlock(&phba->mgmt_sgl_lock);
-                               io_task->psgl_handle = NULL;
-                       }
-                       if (io_task->mtask_addr) {
-                               pci_unmap_single(phba->pcidev,
-                                                io_task->mtask_addr,
-                                                io_task->mtask_data_count,
-                                                PCI_DMA_TODEVICE);
-                               io_task->mtask_addr = 0;
-                       }
-               }
+               if (!beiscsi_conn->login_in_progress)
+                       beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
        }
 }
 
@@ -4146,15 +4239,14 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
        beiscsi_cleanup_task(task);
        spin_unlock_bh(&session->lock);
 
-       pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
-                                      phba->fw_config.iscsi_cid_start));
+       pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
 
        /* Check for the adapter family */
-       if (chip_skh_r(phba->pcidev))
-               beiscsi_offload_cxn_v2(params, pwrb_handle);
-       else
+       if (is_chip_be2_be3r(phba))
                beiscsi_offload_cxn_v0(params, pwrb_handle,
                                       phba->init_mem);
+       else
+               beiscsi_offload_cxn_v2(params, pwrb_handle);
 
        be_dws_le_to_cpu(pwrb_handle->pwrb,
                         sizeof(struct iscsi_target_context_update_wrb));
@@ -4194,6 +4286,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
        struct hwi_wrb_context *pwrb_context;
        struct hwi_controller *phwi_ctrlr;
        itt_t itt;
+       uint16_t cri_index = 0;
        struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
        dma_addr_t paddr;
 
@@ -4223,8 +4316,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                        goto free_hndls;
                }
                io_task->pwrb_handle = alloc_wrb_handle(phba,
-                                       beiscsi_conn->beiscsi_conn_cid -
-                                       phba->fw_config.iscsi_cid_start);
+                                       beiscsi_conn->beiscsi_conn_cid);
                if (!io_task->pwrb_handle) {
                        beiscsi_log(phba, KERN_ERR,
                                    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4236,6 +4328,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
        } else {
                io_task->scsi_cmnd = NULL;
                if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
+                       beiscsi_conn->task = task;
                        if (!beiscsi_conn->login_in_progress) {
                                spin_lock(&phba->mgmt_sgl_lock);
                                io_task->psgl_handle = (struct sgl_handle *)
@@ -4257,8 +4350,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                                                        io_task->psgl_handle;
                                io_task->pwrb_handle =
                                        alloc_wrb_handle(phba,
-                                       beiscsi_conn->beiscsi_conn_cid -
-                                       phba->fw_config.iscsi_cid_start);
+                                       beiscsi_conn->beiscsi_conn_cid);
                                if (!io_task->pwrb_handle) {
                                        beiscsi_log(phba, KERN_ERR,
                                                    BEISCSI_LOG_IO |
@@ -4278,7 +4370,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                                io_task->pwrb_handle =
                                                beiscsi_conn->plogin_wrb_handle;
                        }
-                       beiscsi_conn->task = task;
                } else {
                        spin_lock(&phba->mgmt_sgl_lock);
                        io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
@@ -4295,8 +4386,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
                        }
                        io_task->pwrb_handle =
                                        alloc_wrb_handle(phba,
-                                       beiscsi_conn->beiscsi_conn_cid -
-                                       phba->fw_config.iscsi_cid_start);
+                                       beiscsi_conn->beiscsi_conn_cid);
                        if (!io_task->pwrb_handle) {
                                beiscsi_log(phba, KERN_ERR,
                                            BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4324,12 +4414,13 @@ free_io_hndls:
 free_mgmt_hndls:
        spin_lock(&phba->mgmt_sgl_lock);
        free_mgmt_sgl_handle(phba, io_task->psgl_handle);
+       io_task->psgl_handle = NULL;
        spin_unlock(&phba->mgmt_sgl_lock);
 free_hndls:
        phwi_ctrlr = phba->phwi_ctrlr;
-       pwrb_context = &phwi_ctrlr->wrb_context[
-                       beiscsi_conn->beiscsi_conn_cid -
-                       phba->fw_config.iscsi_cid_start];
+       cri_index = BE_GET_CRI_FROM_CID(
+       beiscsi_conn->beiscsi_conn_cid);
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        if (io_task->pwrb_handle)
                free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
        io_task->pwrb_handle = NULL;
@@ -4351,7 +4442,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
        unsigned int doorbell = 0;
 
        pwrb = io_task->pwrb_handle->pwrb;
-       memset(pwrb, 0, sizeof(*pwrb));
 
        io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
        io_task->bhs_len = sizeof(struct be_cmd_bhs);
@@ -4465,19 +4555,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
        pwrb = io_task->pwrb_handle->pwrb;
        memset(pwrb, 0, sizeof(*pwrb));
 
-       if (chip_skh_r(phba->pcidev)) {
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
-                             be32_to_cpu(task->cmdsn));
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
-                             io_task->pwrb_handle->wrb_index);
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
-                             io_task->psgl_handle->sgl_index);
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
-                             task->data_count);
-               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
-                             io_task->pwrb_handle->nxt_wrb_index);
-               pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
-       } else {
+       if (is_chip_be2_be3r(phba)) {
                AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
                              be32_to_cpu(task->cmdsn));
                AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
@@ -4489,6 +4567,18 @@ static int beiscsi_mtask(struct iscsi_task *task)
                AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
                              io_task->pwrb_handle->nxt_wrb_index);
                pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
+       } else {
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+                             be32_to_cpu(task->cmdsn));
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+                             io_task->pwrb_handle->wrb_index);
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+                             io_task->psgl_handle->sgl_index);
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
+                             task->data_count);
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+                             io_task->pwrb_handle->nxt_wrb_index);
+               pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
        }
 
 
@@ -4501,19 +4591,19 @@ static int beiscsi_mtask(struct iscsi_task *task)
        case ISCSI_OP_NOOP_OUT:
                if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
                        ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
-                       if (chip_skh_r(phba->pcidev))
-                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+                       if (is_chip_be2_be3r(phba))
+                               AMAP_SET_BITS(struct amap_iscsi_wrb,
                                              dmsg, pwrb, 1);
                        else
-                               AMAP_SET_BITS(struct amap_iscsi_wrb,
+                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
                                              dmsg, pwrb, 1);
                } else {
                        ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
-                       if (chip_skh_r(phba->pcidev))
-                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+                       if (is_chip_be2_be3r(phba))
+                               AMAP_SET_BITS(struct amap_iscsi_wrb,
                                              dmsg, pwrb, 0);
                        else
-                               AMAP_SET_BITS(struct amap_iscsi_wrb,
+                               AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
                                              dmsg, pwrb, 0);
                }
                hwi_write_buffer(pwrb, task);
@@ -4540,9 +4630,9 @@ static int beiscsi_mtask(struct iscsi_task *task)
        }
 
        /* Set the task type */
-       io_task->wrb_type = (chip_skh_r(phba->pcidev)) ?
-               AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) :
-               AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb);
+       io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
+               AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
+               AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
 
        doorbell |= cid & DB_WRB_POST_CID_MASK;
        doorbell |= (io_task->pwrb_handle->wrb_index &
@@ -4834,6 +4924,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        case OC_SKH_ID1:
                phba->generation = BE_GEN4;
                phba->iotask_fn = beiscsi_iotask_v2;
+               break;
        default:
                phba->generation = 0;
        }
index 5946577d79d6579f6f4fa4ff8c8eaf156caf9ac5..2c06ef3c02aca49974f6bee2487f09b98c2b1d4d 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -36,7 +36,7 @@
 
 #include "be.h"
 #define DRV_NAME               "be2iscsi"
-#define BUILD_STR              "10.0.272.0"
+#define BUILD_STR              "10.0.467.0"
 #define BE_NAME                        "Emulex OneConnect" \
                                "Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC               BE_NAME " " "Driver"
@@ -66,8 +66,9 @@
 
 #define MAX_CPUS               64
 #define BEISCSI_MAX_NUM_CPUS   7
-#define OC_SKH_MAX_NUM_CPUS    63
+#define OC_SKH_MAX_NUM_CPUS    31
 
+#define BEISCSI_VER_STRLEN 32
 
 #define BEISCSI_SGLIST_ELEMENTS        30
 
@@ -265,7 +266,9 @@ struct invalidate_command_table {
        unsigned short cid;
 } __packed;
 
-#define chip_skh_r(pdev)       (pdev->device == OC_SKH_ID1)
+#define chip_be2(phba)      (phba->generation == BE_GEN2)
+#define chip_be3_r(phba)    (phba->generation == BE_GEN3)
+#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba)))
 struct beiscsi_hba {
        struct hba_parameters params;
        struct hwi_controller *phwi_ctrlr;
@@ -304,10 +307,15 @@ struct beiscsi_hba {
        unsigned short avlbl_cids;
        unsigned short cid_alloc;
        unsigned short cid_free;
-       struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2];
        struct list_head hba_queue;
+#define BE_MAX_SESSION 2048
+#define BE_SET_CID_TO_CRI(cri_index, cid) \
+                         (phba->cid_to_cri_map[cid] = cri_index)
+#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
+       unsigned short cid_to_cri_map[BE_MAX_SESSION];
        unsigned short *cid_array;
        struct iscsi_endpoint **ep_array;
+       struct beiscsi_conn **conn_table;
        struct iscsi_boot_kset *boot_kset;
        struct Scsi_Host *shost;
        struct iscsi_iface *ipv4_iface;
@@ -339,6 +347,7 @@ struct beiscsi_hba {
        struct delayed_work beiscsi_hw_check_task;
 
        u8 mac_address[ETH_ALEN];
+       char fw_ver_str[BEISCSI_VER_STRLEN];
        char wq_name[20];
        struct workqueue_struct *wq;    /* The actuak work queue */
        struct be_ctrl_info ctrl;
@@ -563,7 +572,7 @@ struct hwi_async_pdu_context {
         * This is a varying size list! Do not add anything
         * after this entry!!
         */
-       struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2];
+       struct hwi_async_entry *async_entry;
 };
 
 #define PDUCQE_CODE_MASK       0x0000003F
@@ -749,6 +758,8 @@ void
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
 
 void beiscsi_process_all_cqs(struct work_struct *work);
+void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
+                                    struct iscsi_task *task);
 
 static inline bool beiscsi_error(struct beiscsi_hba *phba)
 {
@@ -933,7 +944,7 @@ struct hwi_controller {
        struct sgl_handle *psgl_handle_base;
        unsigned int wrb_mem_index;
 
-       struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2];
+       struct hwi_wrb_context *wrb_context;
        struct mcc_wrb *pmcc_wrb_base;
        struct be_ring default_pdu_hdr;
        struct be_ring default_pdu_data;
@@ -970,9 +981,7 @@ struct hwi_context_memory {
        struct be_queue_info be_def_hdrq;
        struct be_queue_info be_def_dataq;
 
-       struct be_queue_info be_wrbq[BE2_MAX_SESSIONS];
-       struct be_mcc_wrb_context *pbe_mcc_context;
-
+       struct be_queue_info *be_wrbq;
        struct hwi_async_pdu_context *pasync_ctx;
 };
 
index 55cc9902263dd84bc047fadb429ea9a45ababa50..245a9595a93a131449fd660a7b7d8bd0e9e7f3b5 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -368,6 +368,8 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
                beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
                            "BM_%d : phba->fw_config.iscsi_features = %d\n",
                            phba->fw_config.iscsi_features);
+               memcpy(phba->fw_ver_str, resp->params.hba_attribs.
+                      firmware_version_string, BEISCSI_VER_STRLEN);
        } else
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BG_%d :  Failed in mgmt_check_supported_fw\n");
@@ -1259,6 +1261,45 @@ beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr,
        return snprintf(buf, PAGE_SIZE, BE_NAME "\n");
 }
 
+/**
+ * beiscsi_fw_ver_disp()- Display Firmware Version
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Firmware version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,
+                    char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str);
+}
+
+/**
+ * beiscsi_active_cid_disp()- Display Sessions Active
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text Session Count
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                      (phba->params.cxns_per_ctrl - phba->avlbl_cids));
+}
+
 /**
  * beiscsi_adap_family_disp()- Display adapter family.
  * @dev: ptr to device to get priv structure
index 2e4968add799f7e91ab53e38edd21cd5e74eaa44..04af7e74fe4884a54a773f0471b264412de726d7 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2012 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -156,25 +156,25 @@ union invalidate_commands_params {
 } __packed;
 
 struct mgmt_hba_attributes {
-       u8 flashrom_version_string[32];
-       u8 manufacturer_name[32];
+       u8 flashrom_version_string[BEISCSI_VER_STRLEN];
+       u8 manufacturer_name[BEISCSI_VER_STRLEN];
        u32 supported_modes;
        u8 seeprom_version_lo;
        u8 seeprom_version_hi;
        u8 rsvd0[2];
        u32 fw_cmd_data_struct_version;
        u32 ep_fw_data_struct_version;
-       u32 future_reserved[12];
+       u8 ncsi_version_string[12];
        u32 default_extended_timeout;
-       u8 controller_model_number[32];
+       u8 controller_model_number[BEISCSI_VER_STRLEN];
        u8 controller_description[64];
-       u8 controller_serial_number[32];
-       u8 ip_version_string[32];
-       u8 firmware_version_string[32];
-       u8 bios_version_string[32];
-       u8 redboot_version_string[32];
-       u8 driver_version_string[32];
-       u8 fw_on_flash_version_string[32];
+       u8 controller_serial_number[BEISCSI_VER_STRLEN];
+       u8 ip_version_string[BEISCSI_VER_STRLEN];
+       u8 firmware_version_string[BEISCSI_VER_STRLEN];
+       u8 bios_version_string[BEISCSI_VER_STRLEN];
+       u8 redboot_version_string[BEISCSI_VER_STRLEN];
+       u8 driver_version_string[BEISCSI_VER_STRLEN];
+       u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN];
        u32 functionalities_supported;
        u16 max_cdblength;
        u8 asic_revision;
@@ -190,7 +190,8 @@ struct mgmt_hba_attributes {
        u32 firmware_post_status;
        u32 hba_mtu[8];
        u8 iscsi_features;
-       u8 future_u8[3];
+       u8 asic_generation;
+       u8 future_u8[2];
        u32 future_u32[3];
 } __packed;
 
@@ -207,7 +208,7 @@ struct mgmt_controller_attributes {
        u64 unique_identifier;
        u8 netfilters;
        u8 rsvd0[3];
-       u8 future_u32[4];
+       u32 future_u32[4];
 } __packed;
 
 struct be_mgmt_controller_attributes {
@@ -311,6 +312,12 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
 ssize_t beiscsi_drvr_ver_disp(struct device *dev,
                               struct device_attribute *attr, char *buf);
 
+ssize_t beiscsi_fw_ver_disp(struct device *dev,
+                            struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_active_cid_disp(struct device *dev,
+                                struct device_attribute *attr, char *buf);
+
 ssize_t beiscsi_adap_family_disp(struct device *dev,
                                  struct device_attribute *attr, char *buf);
 
index 11596b2c4702ae952c7063ab93494a0d5f9bb59f..08b22a901c2590e3aca12ebcfe4484e143c54460 100644 (file)
@@ -2,7 +2,7 @@
 #define _BNX2FC_H_
 /* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include "bnx2fc_constants.h"
 
 #define BNX2FC_NAME            "bnx2fc"
-#define BNX2FC_VERSION         "1.0.13"
+#define BNX2FC_VERSION         "1.0.14"
 
 #define PFX                    "bnx2fc: "
 
+#define BCM_CHIP_LEN           16
+
 #define BNX2X_DOORBELL_PCI_BAR         2
 
 #define BNX2FC_MAX_BD_LEN              0xffff
@@ -241,6 +243,8 @@ struct bnx2fc_hba {
        int wait_for_link_down;
        int num_ofld_sess;
        struct list_head vports;
+
+       char chip_num[BCM_CHIP_LEN];
 };
 
 struct bnx2fc_interface {
index bdbbb13b8534c2318464b1b9a803c554af2b17ac..b1c9a4f8caee852f2cbf16b3735a7f7d912bb014 100644 (file)
@@ -3,7 +3,7 @@
  * This file contains helper routines that handle ELS requests
  * and responses.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 7dffec1e57158469328e670df896767227261ae7..69ac55495c1d7479d75f79b13fdb22659875b95c 100644 (file)
@@ -3,7 +3,7 @@
  * cnic modules to create FCoE instances, send/receive non-offloaded
  * FIP/FCoE packets, listen to link events etc.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
 
 #define DRV_MODULE_NAME                "bnx2fc"
 #define DRV_MODULE_VERSION     BNX2FC_VERSION
-#define DRV_MODULE_RELDATE     "Dec 21, 2012"
+#define DRV_MODULE_RELDATE     "Mar 08, 2013"
 
 
 static char version[] =
@@ -679,6 +679,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
 {
        struct fcoe_port *port = lport_priv(lport);
        struct bnx2fc_interface *interface = port->priv;
+       struct bnx2fc_hba *hba = interface->hba;
        struct Scsi_Host *shost = lport->host;
        int rc = 0;
 
@@ -699,8 +700,9 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
        }
        if (!lport->vport)
                fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
-       sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
-               BNX2FC_NAME, BNX2FC_VERSION,
+       snprintf(fc_host_symbolic_name(lport->host), 256,
+                "%s (Broadcom %s) v%s over %s",
+               BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION,
                interface->netdev->name);
 
        return 0;
@@ -1656,23 +1658,60 @@ mem_err:
 static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
 {
        struct cnic_dev *cnic;
+       struct pci_dev *pdev;
 
        if (!hba->cnic) {
                printk(KERN_ERR PFX "cnic is NULL\n");
                return -ENODEV;
        }
        cnic = hba->cnic;
-       hba->pcidev = cnic->pcidev;
-       if (hba->pcidev)
-               pci_dev_get(hba->pcidev);
+       pdev = hba->pcidev = cnic->pcidev;
+       if (!hba->pcidev)
+               return -ENODEV;
 
+       switch (pdev->device) {
+       case PCI_DEVICE_ID_NX2_57710:
+               strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57711:
+               strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57712:
+       case PCI_DEVICE_ID_NX2_57712_MF:
+       case PCI_DEVICE_ID_NX2_57712_VF:
+               strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57800:
+       case PCI_DEVICE_ID_NX2_57800_MF:
+       case PCI_DEVICE_ID_NX2_57800_VF:
+               strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57810:
+       case PCI_DEVICE_ID_NX2_57810_MF:
+       case PCI_DEVICE_ID_NX2_57810_VF:
+               strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN);
+               break;
+       case PCI_DEVICE_ID_NX2_57840:
+       case PCI_DEVICE_ID_NX2_57840_MF:
+       case PCI_DEVICE_ID_NX2_57840_VF:
+       case PCI_DEVICE_ID_NX2_57840_2_20:
+       case PCI_DEVICE_ID_NX2_57840_4_10:
+               strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN);
+               break;
+       default:
+               pr_err(PFX "Unknown device id 0x%x\n", pdev->device);
+               break;
+       }
+       pci_dev_get(hba->pcidev);
        return 0;
 }
 
 static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
 {
-       if (hba->pcidev)
+       if (hba->pcidev) {
+               hba->chip_num[0] = '\0';
                pci_dev_put(hba->pcidev);
+       }
        hba->pcidev = NULL;
 }
 
index 50510ffe1bf59444b2dcf28cd8c1345b41b3269a..c0d035a8f8f9e79a08a0b313742c2f7b0009ca6f 100644 (file)
@@ -2,7 +2,7 @@
  * This file contains the code that low level functions that interact
  * with 57712 FCoE firmware.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -126,7 +126,11 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
        fcoe_init3.error_bit_map_lo = 0xffffffff;
        fcoe_init3.error_bit_map_hi = 0xffffffff;
 
-       fcoe_init3.perf_config = 1;
+       /*
+        * enable both cached connection and cached tasks
+        * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
+        */
+       fcoe_init3.perf_config = 3;
 
        kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
        kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
index 723a9a8ba5ee28f48de2e67854205f5749287cee..575142e92d9c5219b10fb286d23a12ee1580db58 100644 (file)
@@ -1,7 +1,7 @@
 /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
  * IO manager and SCSI IO processing.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1270,8 +1270,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
 
        spin_lock_bh(&tgt->tgt_lock);
        io_req->wait_for_comp = 0;
-       if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
-                                   &io_req->req_flags))) {
+       if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+               BNX2FC_IO_DBG(io_req, "IO completed in a different context\n");
+               rc = SUCCESS;
+       } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+                                     &io_req->req_flags))) {
                /* Let the scsi-ml try to recover this command */
                printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
                       io_req->xid);
index c57a3bb8a9fbfe1944e796a67df7cf81efec23c8..4d93177dfb530c4446d959bd97fb71cd5dee86d1 100644 (file)
@@ -2,7 +2,7 @@
  * Handles operations such as session offload/upload etc, and manages
  * session resources such as connection id and qp resources.
  *
- * Copyright (c) 2008 - 2011 Broadcom Corporation
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 0f9c04175b11d4894f34157bf8177d6956c2f9c7..372a67d122d38fc161743c105db229c534d26a89 100644 (file)
@@ -114,7 +114,7 @@ struct csio_lnode_stats {
        uint32_t        n_rnode_match;  /* matched rnode */
        uint32_t        n_dev_loss_tmo; /* Device loss timeout */
        uint32_t        n_fdmi_err;     /* fdmi err */
-       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO];  /* fw events */
+       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO + 1];      /* fw events */
        enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT];   /* State m/c events */
        uint32_t        n_rnode_alloc;  /* rnode allocated */
        uint32_t        n_rnode_free;   /* rnode freed */
index 65940096a80d8426eb438392bf544ecee1d5a1e1..43343422122202c9614d4b76539c5a5d20e2fab6 100644 (file)
@@ -63,7 +63,7 @@ struct csio_rnode_stats {
        uint32_t        n_err_nomem;    /* error nomem */
        uint32_t        n_evt_unexp;    /* unexpected event */
        uint32_t        n_evt_drop;     /* unexpected event */
-       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO];  /* fw events */
+       uint32_t        n_evt_fw[PROTO_ERR_IMPL_LOGO + 1];      /* fw events */
        enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT];  /* State m/c events */
        uint32_t        n_lun_rst;      /* Number of resets of
                                         * of LUNs under this
index 98436c3630359f7e348cb496ed4a719575675e2d..b6d1f92ed33cc17e67ff4cbe1b3c424f5f75bd0e 100644 (file)
@@ -38,7 +38,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.5.0.2"
+#define DRV_VERSION            "1.5.0.22"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
@@ -192,6 +192,18 @@ enum fnic_state {
 
 struct mempool;
 
+enum fnic_evt {
+       FNIC_EVT_START_VLAN_DISC = 1,
+       FNIC_EVT_START_FCF_DISC = 2,
+       FNIC_EVT_MAX,
+};
+
+struct fnic_event {
+       struct list_head list;
+       struct fnic *fnic;
+       enum fnic_evt event;
+};
+
 /* Per-instance private data structure */
 struct fnic {
        struct fc_lport *lport;
@@ -254,6 +266,18 @@ struct fnic {
        struct sk_buff_head frame_queue;
        struct sk_buff_head tx_queue;
 
+       /*** FIP related data members  -- start ***/
+       void (*set_vlan)(struct fnic *, u16 vlan);
+       struct work_struct      fip_frame_work;
+       struct sk_buff_head     fip_frame_queue;
+       struct timer_list       fip_timer;
+       struct list_head        vlans;
+       spinlock_t              vlans_lock;
+
+       struct work_struct      event_work;
+       struct list_head        evlist;
+       /*** FIP related data members  -- end ***/
+
        /* copy work queue cache line section */
        ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX];
        /* completion queue cache line section */
@@ -278,6 +302,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip)
 }
 
 extern struct workqueue_struct *fnic_event_queue;
+extern struct workqueue_struct *fnic_fip_queue;
 extern struct device_attribute *fnic_attrs[];
 
 void fnic_clear_intr_mode(struct fnic *fnic);
@@ -289,6 +314,7 @@ int fnic_send(struct fc_lport *, struct fc_frame *);
 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
 void fnic_handle_frame(struct work_struct *work);
 void fnic_handle_link(struct work_struct *work);
+void fnic_handle_event(struct work_struct *work);
 int fnic_rq_cmpl_handler(struct fnic *fnic, int);
 int fnic_alloc_rq_frame(struct vnic_rq *rq);
 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
@@ -321,6 +347,12 @@ void fnic_handle_link_event(struct fnic *fnic);
 
 int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
 
+void fnic_handle_fip_frame(struct work_struct *work);
+void fnic_handle_fip_event(struct fnic *fnic);
+void fnic_fcoe_reset_vlans(struct fnic *fnic);
+void fnic_fcoe_evlist_free(struct fnic *fnic);
+extern void fnic_handle_fip_timer(struct fnic *fnic);
+
 static inline int
 fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
 {
index 483eb9dbe66366377c5dd4f049c15a160ca2a500..006fa92a02df0b4178f2ce88c73dffd8635cb5fc 100644 (file)
 #include <scsi/libfc.h>
 #include "fnic_io.h"
 #include "fnic.h"
+#include "fnic_fip.h"
 #include "cq_enet_desc.h"
 #include "cq_exch_desc.h"
 
+static u8 fcoe_all_fcfs[ETH_ALEN];
+struct workqueue_struct *fnic_fip_queue;
 struct workqueue_struct *fnic_event_queue;
 
 static void fnic_set_eth_mode(struct fnic *);
+static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
+static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
+static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
+static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
+static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
 
 void fnic_handle_link(struct work_struct *work)
 {
@@ -69,6 +77,11 @@ void fnic_handle_link(struct work_struct *work)
                                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
                                             "link down\n");
                                fcoe_ctlr_link_down(&fnic->ctlr);
+                               if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+                                       /* start FCoE VLAN discovery */
+                                       fnic_fcoe_send_vlan_req(fnic);
+                                       return;
+                               }
                                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
                                             "link up\n");
                                fcoe_ctlr_link_up(&fnic->ctlr);
@@ -79,6 +92,11 @@ void fnic_handle_link(struct work_struct *work)
        } else if (fnic->link_status) {
                /* DOWN -> UP */
                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+                       /* start FCoE VLAN discovery */
+                       fnic_fcoe_send_vlan_req(fnic);
+                       return;
+               }
                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
                fcoe_ctlr_link_up(&fnic->ctlr);
        } else {
@@ -128,6 +146,441 @@ void fnic_handle_frame(struct work_struct *work)
        }
 }
 
+void fnic_fcoe_evlist_free(struct fnic *fnic)
+{
+       struct fnic_event *fevt = NULL;
+       struct fnic_event *next = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (list_empty(&fnic->evlist)) {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               return;
+       }
+
+       list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
+               list_del(&fevt->list);
+               kfree(fevt);
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+void fnic_handle_event(struct work_struct *work)
+{
+       struct fnic *fnic = container_of(work, struct fnic, event_work);
+       struct fnic_event *fevt = NULL;
+       struct fnic_event *next = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (list_empty(&fnic->evlist)) {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               return;
+       }
+
+       list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
+               if (fnic->stop_rx_link_events) {
+                       list_del(&fevt->list);
+                       kfree(fevt);
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       return;
+               }
+               /*
+                * If we're in a transitional state, just re-queue and return.
+                * The queue will be serviced when we get to a stable state.
+                */
+               if (fnic->state != FNIC_IN_FC_MODE &&
+                   fnic->state != FNIC_IN_ETH_MODE) {
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       return;
+               }
+
+               list_del(&fevt->list);
+               switch (fevt->event) {
+               case FNIC_EVT_START_VLAN_DISC:
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       fnic_fcoe_send_vlan_req(fnic);
+                       spin_lock_irqsave(&fnic->fnic_lock, flags);
+                       break;
+               case FNIC_EVT_START_FCF_DISC:
+                       FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                                 "Start FCF Discovery\n");
+                       fnic_fcoe_start_fcf_disc(fnic);
+                       break;
+               default:
+                       FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                                 "Unknown event 0x%x\n", fevt->event);
+                       break;
+               }
+               kfree(fevt);
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+}
+
+/**
+ * Check if the Received FIP FLOGI frame is rejected
+ * @fip: The FCoE controller that received the frame
+ * @skb: The received FIP frame
+ *
+ * Returns non-zero if the frame is rejected with unsupported cmd with
+ * insufficient resource els explanation.
+ */
+static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
+                                        struct sk_buff *skb)
+{
+       struct fc_lport *lport = fip->lp;
+       struct fip_header *fiph;
+       struct fc_frame_header *fh = NULL;
+       struct fip_desc *desc;
+       struct fip_encaps *els;
+       enum fip_desc_type els_dtype = 0;
+       u16 op;
+       u8 els_op;
+       u8 sub;
+
+       size_t els_len = 0;
+       size_t rlen;
+       size_t dlen = 0;
+
+       if (skb_linearize(skb))
+               return 0;
+
+       if (skb->len < sizeof(*fiph))
+               return 0;
+
+       fiph = (struct fip_header *)skb->data;
+       op = ntohs(fiph->fip_op);
+       sub = fiph->fip_subcode;
+
+       if (op != FIP_OP_LS)
+               return 0;
+
+       if (sub != FIP_SC_REP)
+               return 0;
+
+       rlen = ntohs(fiph->fip_dl_len) * 4;
+       if (rlen + sizeof(*fiph) > skb->len)
+               return 0;
+
+       desc = (struct fip_desc *)(fiph + 1);
+       dlen = desc->fip_dlen * FIP_BPW;
+
+       if (desc->fip_dtype == FIP_DT_FLOGI) {
+
+               shost_printk(KERN_DEBUG, lport->host,
+                         " FIP TYPE FLOGI: fab name:%llx "
+                         "vfid:%d map:%x\n",
+                         fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
+                         fip->sel_fcf->fc_map);
+               if (dlen < sizeof(*els) + sizeof(*fh) + 1)
+                       return 0;
+
+               els_len = dlen - sizeof(*els);
+               els = (struct fip_encaps *)desc;
+               fh = (struct fc_frame_header *)(els + 1);
+               els_dtype = desc->fip_dtype;
+
+               if (!fh)
+                       return 0;
+
+               /*
+                * ELS command code, reason and explanation should be = Reject,
+                * unsupported command and insufficient resource
+                */
+               els_op = *(u8 *)(fh + 1);
+               if (els_op == ELS_LS_RJT) {
+                       shost_printk(KERN_INFO, lport->host,
+                                 "Flogi Request Rejected by Switch\n");
+                       return 1;
+               }
+               shost_printk(KERN_INFO, lport->host,
+                               "Flogi Request Accepted by Switch\n");
+       }
+       return 0;
+}
+
+static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
+{
+       struct fcoe_ctlr *fip = &fnic->ctlr;
+       struct sk_buff *skb;
+       char *eth_fr;
+       int fr_len;
+       struct fip_vlan *vlan;
+       u64 vlan_tov;
+
+       fnic_fcoe_reset_vlans(fnic);
+       fnic->set_vlan(fnic, 0);
+       FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                 "Sending VLAN request...\n");
+       skb = dev_alloc_skb(sizeof(struct fip_vlan));
+       if (!skb)
+               return;
+
+       fr_len = sizeof(*vlan);
+       eth_fr = (char *)skb->data;
+       vlan = (struct fip_vlan *)eth_fr;
+
+       memset(vlan, 0, sizeof(*vlan));
+       memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
+       memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
+       vlan->eth.h_proto = htons(ETH_P_FIP);
+
+       vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+       vlan->fip.fip_op = htons(FIP_OP_VLAN);
+       vlan->fip.fip_subcode = FIP_SC_VL_REQ;
+       vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
+
+       vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
+       vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
+       memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
+
+       vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
+       vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
+       put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
+
+       skb_put(skb, sizeof(*vlan));
+       skb->protocol = htons(ETH_P_FIP);
+       skb_reset_mac_header(skb);
+       skb_reset_network_header(skb);
+       fip->send(fip, skb);
+
+       /* set a timer so that we can retry if there no response */
+       vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
+       mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
+}
+
+static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
+{
+       struct fcoe_ctlr *fip = &fnic->ctlr;
+       struct fip_header *fiph;
+       struct fip_desc *desc;
+       u16 vid;
+       size_t rlen;
+       size_t dlen;
+       struct fcoe_vlan *vlan;
+       u64 sol_time;
+       unsigned long flags;
+
+       FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                 "Received VLAN response...\n");
+
+       fiph = (struct fip_header *) skb->data;
+
+       FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
+                 ntohs(fiph->fip_op), fiph->fip_subcode);
+
+       rlen = ntohs(fiph->fip_dl_len) * 4;
+       fnic_fcoe_reset_vlans(fnic);
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       desc = (struct fip_desc *)(fiph + 1);
+       while (rlen > 0) {
+               dlen = desc->fip_dlen * FIP_BPW;
+               switch (desc->fip_dtype) {
+               case FIP_DT_VLAN:
+                       vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
+                       shost_printk(KERN_INFO, fnic->lport->host,
+                                 "process_vlan_resp: FIP VLAN %d\n", vid);
+                       vlan = kmalloc(sizeof(*vlan),
+                                                       GFP_ATOMIC);
+                       if (!vlan) {
+                               /* retry from timer */
+                               spin_unlock_irqrestore(&fnic->vlans_lock,
+                                                       flags);
+                               goto out;
+                       }
+                       memset(vlan, 0, sizeof(struct fcoe_vlan));
+                       vlan->vid = vid & 0x0fff;
+                       vlan->state = FIP_VLAN_AVAIL;
+                       list_add_tail(&vlan->list, &fnic->vlans);
+                       break;
+               }
+               desc = (struct fip_desc *)((char *)desc + dlen);
+               rlen -= dlen;
+       }
+
+       /* any VLAN descriptors present ? */
+       if (list_empty(&fnic->vlans)) {
+               /* retry from timer */
+               FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
+                         "No VLAN descriptors in FIP VLAN response\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               goto out;
+       }
+
+       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       fnic->set_vlan(fnic, vlan->vid);
+       vlan->state = FIP_VLAN_SENT; /* sent now */
+       vlan->sol_count++;
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+       /* start the solicitation */
+       fcoe_ctlr_link_up(fip);
+
+       sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+       mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+out:
+       return;
+}
+
+static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
+{
+       unsigned long flags;
+       struct fcoe_vlan *vlan;
+       u64 sol_time;
+
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       fnic->set_vlan(fnic, vlan->vid);
+       vlan->state = FIP_VLAN_SENT; /* sent now */
+       vlan->sol_count = 1;
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+
+       /* start the solicitation */
+       fcoe_ctlr_link_up(&fnic->ctlr);
+
+       sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
+       mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+}
+
+static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
+{
+       unsigned long flags;
+       struct fcoe_vlan *fvlan;
+
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       if (list_empty(&fnic->vlans)) {
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               return -EINVAL;
+       }
+
+       fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       if (fvlan->state == FIP_VLAN_USED) {
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               return 0;
+       }
+
+       if (fvlan->state == FIP_VLAN_SENT) {
+               fvlan->state = FIP_VLAN_USED;
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               return 0;
+       }
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+       return -EINVAL;
+}
+
+static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
+{
+       struct fnic_event *fevt;
+       unsigned long flags;
+
+       fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
+       if (!fevt)
+               return;
+
+       fevt->fnic = fnic;
+       fevt->event = ev;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       list_add_tail(&fevt->list, &fnic->evlist);
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+       schedule_work(&fnic->event_work);
+}
+
+static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
+{
+       struct fip_header *fiph;
+       int ret = 1;
+       u16 op;
+       u8 sub;
+
+       if (!skb || !(skb->data))
+               return -1;
+
+       if (skb_linearize(skb))
+               goto drop;
+
+       fiph = (struct fip_header *)skb->data;
+       op = ntohs(fiph->fip_op);
+       sub = fiph->fip_subcode;
+
+       if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
+               goto drop;
+
+       if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
+               goto drop;
+
+       if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
+               if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
+                       goto drop;
+               /* pass it on to fcoe */
+               ret = 1;
+       } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) {
+               /* set the vlan as used */
+               fnic_fcoe_process_vlan_resp(fnic, skb);
+               ret = 0;
+       } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
+               /* received CVL request, restart vlan disc */
+               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+               /* pass it on to fcoe */
+               ret = 1;
+       }
+drop:
+       return ret;
+}
+
+void fnic_handle_fip_frame(struct work_struct *work)
+{
+       struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
+       unsigned long flags;
+       struct sk_buff *skb;
+       struct ethhdr *eh;
+
+       while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
+               spin_lock_irqsave(&fnic->fnic_lock, flags);
+               if (fnic->stop_rx_link_events) {
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       dev_kfree_skb(skb);
+                       return;
+               }
+               /*
+                * If we're in a transitional state, just re-queue and return.
+                * The queue will be serviced when we get to a stable state.
+                */
+               if (fnic->state != FNIC_IN_FC_MODE &&
+                   fnic->state != FNIC_IN_ETH_MODE) {
+                       skb_queue_head(&fnic->fip_frame_queue, skb);
+                       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                       return;
+               }
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               eh = (struct ethhdr *)skb->data;
+               if (eh->h_proto == htons(ETH_P_FIP)) {
+                       skb_pull(skb, sizeof(*eh));
+                       if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
+                               dev_kfree_skb(skb);
+                               continue;
+                       }
+                       /*
+                        * If there's FLOGI rejects - clear all
+                        * fcf's & restart from scratch
+                        */
+                       if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
+                               shost_printk(KERN_INFO, fnic->lport->host,
+                                         "Trigger a Link down - VLAN Disc\n");
+                               fcoe_ctlr_link_down(&fnic->ctlr);
+                               /* start FCoE VLAN discovery */
+                               fnic_fcoe_send_vlan_req(fnic);
+                               dev_kfree_skb(skb);
+                               continue;
+                       }
+                       fcoe_ctlr_recv(&fnic->ctlr, skb);
+                       continue;
+               }
+       }
+}
+
 /**
  * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  * @fnic:      fnic instance.
@@ -150,8 +603,14 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
                skb_reset_mac_header(skb);
        }
        if (eh->h_proto == htons(ETH_P_FIP)) {
-               skb_pull(skb, sizeof(*eh));
-               fcoe_ctlr_recv(&fnic->ctlr, skb);
+               if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
+                       printk(KERN_ERR "Dropped FIP frame, as firmware "
+                                       "uses non-FIP mode, Enable FIP "
+                                       "using UCSM\n");
+                       goto drop;
+               }
+               skb_queue_tail(&fnic->fip_frame_queue, skb);
+               queue_work(fnic_fip_queue, &fnic->fip_frame_work);
                return 1;               /* let caller know packet was used */
        }
        if (eh->h_proto != htons(ETH_P_FCOE))
@@ -720,3 +1179,104 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
        dev_kfree_skb(fp_skb(fp));
        buf->os_buf = NULL;
 }
+
+void fnic_fcoe_reset_vlans(struct fnic *fnic)
+{
+       unsigned long flags;
+       struct fcoe_vlan *vlan;
+       struct fcoe_vlan *next;
+
+       /*
+        * indicate a link down to fcoe so that all fcf's are free'd
+        * might not be required since we did this before sending vlan
+        * discovery request
+        */
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       if (!list_empty(&fnic->vlans)) {
+               list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
+                       list_del(&vlan->list);
+                       kfree(vlan);
+               }
+       }
+       spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+}
+
+void fnic_handle_fip_timer(struct fnic *fnic)
+{
+       unsigned long flags;
+       struct fcoe_vlan *vlan;
+       u64 sol_time;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (fnic->stop_rx_link_events) {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               return;
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+
+       if (fnic->ctlr.mode == FIP_ST_NON_FIP)
+               return;
+
+       spin_lock_irqsave(&fnic->vlans_lock, flags);
+       if (list_empty(&fnic->vlans)) {
+               /* no vlans available, try again */
+               FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                         "Start VLAN Discovery\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+               return;
+       }
+
+       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
+       shost_printk(KERN_DEBUG, fnic->lport->host,
+                 "fip_timer: vlan %d state %d sol_count %d\n",
+                 vlan->vid, vlan->state, vlan->sol_count);
+       switch (vlan->state) {
+       case FIP_VLAN_USED:
+               FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                         "FIP VLAN is selected for FC transaction\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               break;
+       case FIP_VLAN_FAILED:
+               /* if all vlans are in failed state, restart vlan disc */
+               FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
+                         "Start VLAN Discovery\n");
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+               break;
+       case FIP_VLAN_SENT:
+               if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
+                       /*
+                        * no response on this vlan, remove  from the list.
+                        * Try the next vlan
+                        */
+                       shost_printk(KERN_INFO, fnic->lport->host,
+                                 "Dequeue this VLAN ID %d from list\n",
+                                 vlan->vid);
+                       list_del(&vlan->list);
+                       kfree(vlan);
+                       vlan = NULL;
+                       if (list_empty(&fnic->vlans)) {
+                               /* we exhausted all vlans, restart vlan disc */
+                               spin_unlock_irqrestore(&fnic->vlans_lock,
+                                                       flags);
+                               shost_printk(KERN_INFO, fnic->lport->host,
+                                         "fip_timer: vlan list empty, "
+                                         "trigger vlan disc\n");
+                               fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
+                               return;
+                       }
+                       /* check the next vlan */
+                       vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
+                                                       list);
+                       fnic->set_vlan(fnic, vlan->vid);
+                       vlan->state = FIP_VLAN_SENT; /* sent now */
+               }
+               spin_unlock_irqrestore(&fnic->vlans_lock, flags);
+               vlan->sol_count++;
+               sol_time = jiffies + msecs_to_jiffies
+                                       (FCOE_CTLR_START_DELAY);
+               mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
+               break;
+       }
+}
diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h
new file mode 100644 (file)
index 0000000..87e74c2
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _FNIC_FIP_H_
+#define _FNIC_FIP_H_
+
+
+#define FCOE_CTLR_START_DELAY    2000    /* ms after first adv. to choose FCF */
+#define FCOE_CTLR_FIPVLAN_TOV    2000    /* ms after FIP VLAN disc */
+#define FCOE_CTLR_MAX_SOL        8
+
+#define FINC_MAX_FLOGI_REJECTS   8
+
+/*
+ * FIP_DT_VLAN descriptor.
+ */
+struct fip_vlan_desc {
+       struct fip_desc fd_desc;
+       __be16 fd_vlan;
+} __attribute__((packed));
+
+struct vlan {
+       __be16 vid;
+       __be16 type;
+};
+
+/*
+ * VLAN entry.
+ */
+struct fcoe_vlan {
+       struct list_head list;
+       u16 vid;                /* vlan ID */
+       u16 sol_count;          /* no. of sols sent */
+       u16 state;              /* state */
+};
+
+enum fip_vlan_state {
+       FIP_VLAN_AVAIL  = 0,    /* don't do anything */
+       FIP_VLAN_SENT   = 1,    /* sent */
+       FIP_VLAN_USED   = 2,    /* succeed */
+       FIP_VLAN_FAILED = 3,    /* failed to response */
+};
+
+struct fip_vlan {
+       struct ethhdr eth;
+       struct fip_header fip;
+       struct {
+               struct fip_mac_desc mac;
+               struct fip_wwn_desc wwnn;
+       } desc;
+};
+
+#endif  /* __FINC_FIP_H_ */
index d601ac543c52bd1f24b50a760c240ab2c374f62c..5f09d1814d2685e86b06d9c6d697659d06cade07 100644 (file)
@@ -39,6 +39,7 @@
 #include "vnic_intr.h"
 #include "vnic_stats.h"
 #include "fnic_io.h"
+#include "fnic_fip.h"
 #include "fnic.h"
 
 #define PCI_DEVICE_ID_CISCO_FNIC       0x0045
@@ -292,6 +293,13 @@ static void fnic_notify_timer(unsigned long data)
                  round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
 }
 
+static void fnic_fip_notify_timer(unsigned long data)
+{
+       struct fnic *fnic = (struct fnic *)data;
+
+       fnic_handle_fip_timer(fnic);
+}
+
 static void fnic_notify_timer_start(struct fnic *fnic)
 {
        switch (vnic_dev_get_intr_mode(fnic->vdev)) {
@@ -403,6 +411,12 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
        return fnic->data_src_addr;
 }
 
+static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
+{
+       u16 old_vlan;
+       old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+}
+
 static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct Scsi_Host *host;
@@ -620,7 +634,29 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
                vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
                vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
+               fnic->set_vlan = fnic_set_vlan;
                fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
+               setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
+                                                       (unsigned long)fnic);
+               spin_lock_init(&fnic->vlans_lock);
+               INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
+               INIT_WORK(&fnic->event_work, fnic_handle_event);
+               skb_queue_head_init(&fnic->fip_frame_queue);
+               spin_lock_irqsave(&fnic_list_lock, flags);
+               if (!fnic_fip_queue) {
+                       fnic_fip_queue =
+                               create_singlethread_workqueue("fnic_fip_q");
+                       if (!fnic_fip_queue) {
+                               spin_unlock_irqrestore(&fnic_list_lock, flags);
+                               printk(KERN_ERR PFX "fnic FIP work queue "
+                                                "create failed\n");
+                               err = -ENOMEM;
+                               goto err_out_free_max_pool;
+                       }
+               }
+               spin_unlock_irqrestore(&fnic_list_lock, flags);
+               INIT_LIST_HEAD(&fnic->evlist);
+               INIT_LIST_HEAD(&fnic->vlans);
        } else {
                shost_printk(KERN_INFO, fnic->lport->host,
                             "firmware uses non-FIP mode\n");
@@ -807,6 +843,13 @@ static void fnic_remove(struct pci_dev *pdev)
        skb_queue_purge(&fnic->frame_queue);
        skb_queue_purge(&fnic->tx_queue);
 
+       if (fnic->config.flags & VFCF_FIP_CAPABLE) {
+               del_timer_sync(&fnic->fip_timer);
+               skb_queue_purge(&fnic->fip_frame_queue);
+               fnic_fcoe_reset_vlans(fnic);
+               fnic_fcoe_evlist_free(fnic);
+       }
+
        /*
         * Log off the fabric. This stops all remote ports, dns port,
         * logs off the fabric. This flushes all rport, disc, lport work
@@ -889,8 +932,8 @@ static int __init fnic_init_module(void)
        len = sizeof(struct fnic_sgl_list);
        fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
                ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
-                SLAB_HWCACHE_ALIGN,
-                NULL);
+                 SLAB_HWCACHE_ALIGN,
+                 NULL);
        if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
                printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
                err = -ENOMEM;
@@ -951,6 +994,10 @@ static void __exit fnic_cleanup_module(void)
 {
        pci_unregister_driver(&fnic_driver);
        destroy_workqueue(fnic_event_queue);
+       if (fnic_fip_queue) {
+               flush_workqueue(fnic_fip_queue);
+               destroy_workqueue(fnic_fip_queue);
+       }
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
        kmem_cache_destroy(fnic_io_req_cache);
index b576be734e2e04a342cd031f75c146096bc1c2f5..9795d6f3e1974e47e98f6d28ef80f1e4f282e528 100644 (file)
@@ -584,6 +584,16 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
        return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
 }
 
+u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
+{
+       u64 a0 = new_default_vlan, a1 = 0;
+       int wait = 1000;
+       int old_vlan = 0;
+
+       old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
+       return (u16)old_vlan;
+}
+
 int vnic_dev_link_status(struct vnic_dev *vdev)
 {
        if (vdev->linkstatus)
index f9935a8a5a0932e14b4f896627ebe59567f8a1d9..40d4195f562bad4f40d406d3e28e48a018b6c213 100644 (file)
@@ -148,6 +148,8 @@ int vnic_dev_disable(struct vnic_dev *vdev);
 int vnic_dev_open(struct vnic_dev *vdev, int arg);
 int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
 int vnic_dev_init(struct vnic_dev *vdev, int arg);
+u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev,
+                               u16 new_default_vlan);
 int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
 int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
index 7c9ccbd4134b722dad20643314fa719fdcffb89c..3e2fcbda6aedad54315aac57e07664df171306db 100644 (file)
@@ -196,6 +196,73 @@ enum vnic_devcmd_cmd {
 
        /* undo initialize of virtual link */
        CMD_DEINIT              = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
+
+       /* check fw capability of a cmd:
+        * in:  (u32)a0=cmd
+        * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
+       CMD_CAPABILITY      = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
+
+       /* persistent binding info
+        * in:  (u64)a0=paddr of arg
+        *      (u32)a1=CMD_PERBI_XXX */
+       CMD_PERBI       = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37),
+
+       /* Interrupt Assert Register functionality
+        * in: (u16)a0=interrupt number to assert
+        */
+       CMD_IAR         = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38),
+
+       /* initiate hangreset, like softreset after hang detected */
+       CMD_HANG_RESET      = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39),
+
+       /* hangreset status:
+        *    out: a0=0 reset complete, a0=1 reset in progress */
+       CMD_HANG_RESET_STATUS   = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40),
+
+       /*
+        * Set hw ingress packet vlan rewrite mode:
+        * in:  (u32)a0=new vlan rewrite mode
+        * out: (u32)a0=old vlan rewrite mode */
+       CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41),
+
+       /*
+        * in:  (u16)a0=bdf of target vnic
+        *      (u32)a1=cmd to proxy
+        *      a2-a15=args to cmd in a1
+        * out: (u32)a0=status of proxied cmd
+        *      a1-a15=out args of proxied cmd */
+       CMD_PROXY_BY_BDF =  _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42),
+
+       /*
+        * As for BY_BDF except a0 is index of hvnlink subordinate vnic
+        * or SR-IOV virtual vnic
+        */
+       CMD_PROXY_BY_INDEX =    _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
+
+       /*
+        * For HPP toggle:
+        * adapter-info-get
+        * in:  (u64)a0=phsical address of buffer passed in from caller.
+        *      (u16)a1=size of buffer specified in a0.
+        * out: (u64)a0=phsical address of buffer passed in from caller.
+        *      (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+        *              0 if no VIF-CONFIG-INFO TLV was ever received. */
+       CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+
+       /*
+        * INT13 API: (u64)a0=paddr to vnic_int13_params struct
+        *            (u32)a1=INT13_CMD_xxx
+        */
+       CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+       /*
+        * Set default vlan:
+        * in: (u16)a0=new default vlan
+        *     (u16)a1=zero for overriding vlan with param a0,
+        *             non-zero for resetting vlan to the default
+        * out: (u16)a0=old default vlan
+        */
+       CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46)
 };
 
 /* flags for CMD_OPEN */
index cc82d0f322b6cb30fd3fdaf1fd5e49665302dece..4e31caa21ddfba379036b6a7b4c085a5f916549f 100644 (file)
@@ -2179,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
                return 0;
        }
 
-       if (vhost->state == IBMVFC_ACTIVE) {
+       if (vhost->logged_in) {
                evt = ibmvfc_get_event(vhost);
                ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
 
@@ -2190,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
                tmf->common.length = sizeof(*tmf);
                tmf->scsi_id = rport->port_id;
                int_to_scsilun(sdev->lun, &tmf->lun);
-               tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+               if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS))
+                       type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
+               if (vhost->state == IBMVFC_ACTIVE)
+                       tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+               else
+                       tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID);
                tmf->cancel_key = (unsigned long)sdev->hostdata;
                tmf->my_cancel_key = (unsigned long)starget->hostdata;
 
@@ -2327,7 +2332,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
        timeout = wait_for_completion_timeout(&evt->comp, timeout);
 
        if (!timeout) {
-               rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+               rc = ibmvfc_cancel_all(sdev, 0);
                if (!rc) {
                        rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
                        if (rc == SUCCESS)
@@ -2383,24 +2388,30 @@ out:
  * @cmd:       scsi command to abort
  *
  * Returns:
- *     SUCCESS / FAILED
+ *     SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct ibmvfc_host *vhost = shost_priv(sdev->host);
-       int cancel_rc, abort_rc;
+       int cancel_rc, block_rc;
        int rc = FAILED;
 
        ENTER;
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        ibmvfc_wait_while_resetting(vhost);
-       cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
-       abort_rc = ibmvfc_abort_task_set(sdev);
+       if (block_rc != FAST_IO_FAIL) {
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
+               ibmvfc_abort_task_set(sdev);
+       } else
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
 
-       if (!cancel_rc && !abort_rc)
+       if (!cancel_rc)
                rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
 
+       if (block_rc == FAST_IO_FAIL && rc != FAILED)
+               rc = FAST_IO_FAIL;
+
        LEAVE;
        return rc;
 }
@@ -2410,28 +2421,46 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
  * @cmd:       scsi command struct
  *
  * Returns:
- *     SUCCESS / FAILED
+ *     SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct ibmvfc_host *vhost = shost_priv(sdev->host);
-       int cancel_rc, reset_rc;
+       int cancel_rc, block_rc, reset_rc = 0;
        int rc = FAILED;
 
        ENTER;
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        ibmvfc_wait_while_resetting(vhost);
-       cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
-       reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+       if (block_rc != FAST_IO_FAIL) {
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
+               reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
+       } else
+               cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
 
        if (!cancel_rc && !reset_rc)
                rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
 
+       if (block_rc == FAST_IO_FAIL && rc != FAILED)
+               rc = FAST_IO_FAIL;
+
        LEAVE;
        return rc;
 }
 
+/**
+ * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
+ * @sdev:      scsi device struct
+ * @data:      return code
+ *
+ **/
+static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
+{
+       unsigned long *rc = data;
+       *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
+}
+
 /**
  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
  * @sdev:      scsi device struct
@@ -2449,26 +2478,33 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
  * @cmd:       scsi command struct
  *
  * Returns:
- *     SUCCESS / FAILED
+ *     SUCCESS / FAST_IO_FAIL / FAILED
  **/
 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
 {
        struct scsi_device *sdev = cmd->device;
        struct ibmvfc_host *vhost = shost_priv(sdev->host);
        struct scsi_target *starget = scsi_target(sdev);
-       int reset_rc;
+       int block_rc;
+       int reset_rc = 0;
        int rc = FAILED;
        unsigned long cancel_rc = 0;
 
        ENTER;
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        ibmvfc_wait_while_resetting(vhost);
-       starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
-       reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+       if (block_rc != FAST_IO_FAIL) {
+               starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
+               reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
+       } else
+               starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
 
        if (!cancel_rc && !reset_rc)
                rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
 
+       if (block_rc == FAST_IO_FAIL && rc != FAILED)
+               rc = FAST_IO_FAIL;
+
        LEAVE;
        return rc;
 }
@@ -2480,12 +2516,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
  **/
 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
 {
-       int rc;
+       int rc, block_rc;
        struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
 
-       fc_block_scsi_eh(cmd);
+       block_rc = fc_block_scsi_eh(cmd);
        dev_err(vhost->dev, "Resetting connection due to error recovery\n");
        rc = ibmvfc_issue_fc_host_lip(vhost->host);
+
+       if (block_rc == FAST_IO_FAIL)
+               return FAST_IO_FAIL;
+
        return rc ? FAILED : SUCCESS;
 }
 
@@ -2509,8 +2549,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
                dev_rport = starget_to_rport(scsi_target(sdev));
                if (dev_rport != rport)
                        continue;
-               ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
-               ibmvfc_abort_task_set(sdev);
+               ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
        }
 
        rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
index 3be8af624e6fce888ab2b148c66321a2ce30558c..017a5290e8c12ba5d27c89a390c4ed6490e05000 100644 (file)
@@ -29,8 +29,8 @@
 #include "viosrp.h"
 
 #define IBMVFC_NAME    "ibmvfc"
-#define IBMVFC_DRIVER_VERSION          "1.0.10"
-#define IBMVFC_DRIVER_DATE             "(August 24, 2012)"
+#define IBMVFC_DRIVER_VERSION          "1.0.11"
+#define IBMVFC_DRIVER_DATE             "(April 12, 2013)"
 
 #define IBMVFC_DEFAULT_TIMEOUT 60
 #define IBMVFC_ADISC_CANCEL_TIMEOUT    45
@@ -208,10 +208,10 @@ struct ibmvfc_npiv_login_resp {
        u16 error;
        u32 flags;
 #define IBMVFC_NATIVE_FC               0x01
-#define IBMVFC_CAN_FLUSH_ON_HALT       0x08
        u32 reserved;
        u64 capabilities;
 #define IBMVFC_CAN_FLUSH_ON_HALT       0x08
+#define IBMVFC_CAN_SUPPRESS_ABTS       0x10
        u32 max_cmds;
        u32 scsi_id_sz;
        u64 max_dma_len;
@@ -351,6 +351,7 @@ struct ibmvfc_tmf {
 #define IBMVFC_TMF_LUN_RESET           0x10
 #define IBMVFC_TMF_TGT_RESET           0x20
 #define IBMVFC_TMF_LUA_VALID           0x40
+#define IBMVFC_TMF_SUPPRESS_ABTS       0x80
        u32 cancel_key;
        u32 my_cancel_key;
        u32 pad;
index 2197b57fb2251f541e0bfc0eab626ce670b66504..82a3c1ec8706600473c4daedd33b248d2c94518f 100644 (file)
@@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
        ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 
-       if (!ioa_cfg->in_reset_reload) {
+       if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
                ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
                dev_err(&ioa_cfg->pdev->dev,
                        "Adapter being reset as a result of error recovery.\n");
@@ -6421,7 +6421,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
 {
        u32 ioadl_flags = 0;
        struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
-       struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+       struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
        struct ipr_ioadl64_desc *last_ioadl64 = NULL;
        int len = qc->nbytes;
        struct scatterlist *sg;
@@ -6441,7 +6441,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
        ioarcb->ioadl_len =
                cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
        ioarcb->u.sis64_addr_data.data_ioadl_addr =
-               cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
+               cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
 
        for_each_sg(qc->sg, sg, qc->n_elem, si) {
                ioadl64->flags = cpu_to_be32(ioadl_flags);
@@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
 {
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       int i;
 
        ENTER;
        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
@@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
 
        ioa_cfg->in_reset_reload = 0;
        ioa_cfg->reset_retries = 0;
+       for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+               spin_lock(&ioa_cfg->hrrq[i]._lock);
+               ioa_cfg->hrrq[i].ioa_is_dead = 1;
+               spin_unlock(&ioa_cfg->hrrq[i]._lock);
+       }
+       wmb();
+
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
        wake_up_all(&ioa_cfg->reset_wait_q);
        LEAVE;
@@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
        if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
                ioa_cfg->sdt_state = ABORT_DUMP;
-       ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
+       ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
        ioa_cfg->in_ioa_bringdown = 1;
        for (i = 0; i < ioa_cfg->hrrq_num; i++) {
                spin_lock(&ioa_cfg->hrrq[i]._lock);
index 21a6ff1ed5c6b1afb49c38d191372bc59533780d..a1fb840596ef08cb129c8301dd81a34d24c73fd5 100644 (file)
@@ -552,7 +552,7 @@ struct ipr_ioarcb_ata_regs {        /* 22 bytes */
        u8 hob_lbam;
        u8 hob_lbah;
        u8 ctl;
-}__attribute__ ((packed, aligned(4)));
+}__attribute__ ((packed, aligned(2)));
 
 struct ipr_ioadl_desc {
        __be32 flags_and_data_len;
index c3aa6c5457b9c386e7237db6201dfb1f8603b83c..96a26f454673cf8b9cad6abd38ee9a28104c6a95 100644 (file)
@@ -1085,7 +1085,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s
        struct isci_host *ihost = idev->owning_port->owning_controller;
        struct domain_device *dev = idev->domain_dev;
 
-       if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
+       if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
                sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
        } else if (dev_is_expander(dev)) {
                sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
@@ -1098,7 +1098,7 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm
        struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
        struct domain_device *dev = idev->domain_dev;
 
-       if (dev->dev_type == SAS_END_DEV) {
+       if (dev->dev_type == SAS_END_DEVICE) {
                struct isci_host *ihost = idev->owning_port->owning_controller;
 
                isci_remote_device_not_ready(ihost, idev,
index 7674caae1d88bac82520c65b0cd6004e19db626f..47a013fffae73c1d416b68fc6b81dc5274bf2c46 100644 (file)
@@ -297,7 +297,7 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte
 
 static inline bool dev_is_expander(struct domain_device *dev)
 {
-       return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
+       return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE;
 }
 
 static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
index 9594ab62702b1786d51d7ac80f7e10b365b6ce61..e3e3bcbd5a9fcf4245bf9d28b53c0274ca642c2d 100644 (file)
@@ -2978,7 +2978,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
        /* all unaccelerated request types (non ssp or ncq) handled with
         * substates
         */
-       if (!task && dev->dev_type == SAS_END_DEV) {
+       if (!task && dev->dev_type == SAS_END_DEVICE) {
                state = SCI_REQ_TASK_WAIT_TC_COMP;
        } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
                state = SCI_REQ_SMP_WAIT_RESP;
@@ -3101,7 +3101,7 @@ sci_io_request_construct(struct isci_host *ihost,
        if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
                return SCI_FAILURE_INVALID_REMOTE_DEVICE;
 
-       if (dev->dev_type == SAS_END_DEV)
+       if (dev->dev_type == SAS_END_DEVICE)
                /* pass */;
        else if (dev_is_sata(dev))
                memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
@@ -3125,7 +3125,7 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost,
        /* Build the common part of the request */
        sci_general_request_construct(ihost, idev, ireq);
 
-       if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
+       if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) {
                set_bit(IREQ_TMF, &ireq->flags);
                memset(ireq->tc, 0, sizeof(struct scu_task_context));
 
index b6f19a1db780f7303abc9796c19553b058c89ada..9bb020ac089cddf6b665e8a52f728c95621ece34 100644 (file)
@@ -250,7 +250,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
        }
 
        /* XXX convert to get this from task->tproto like other drivers */
-       if (dev->dev_type == SAS_END_DEV) {
+       if (dev->dev_type == SAS_END_DEVICE) {
                isci_tmf->proto = SAS_PROTOCOL_SSP;
                status = sci_task_request_construct_ssp(ireq);
                if (status != SCI_SUCCESS)
index bdb81cda84013f0d132aa1972bd409dcb9903f6b..161c98efade9b9f290c04588e4638df0f3c421ac 100644 (file)
@@ -285,14 +285,14 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
        if (phy->attached_tproto & SAS_PROTOCOL_STP)
                dev->tproto = phy->attached_tproto;
        if (phy->attached_sata_dev)
-               dev->tproto |= SATA_DEV;
+               dev->tproto |= SAS_SATA_DEV;
 
-       if (phy->attached_dev_type == SATA_PENDING)
-               dev->dev_type = SATA_PENDING;
+       if (phy->attached_dev_type == SAS_SATA_PENDING)
+               dev->dev_type = SAS_SATA_PENDING;
        else {
                int res;
 
-               dev->dev_type = SATA_DEV;
+               dev->dev_type = SAS_SATA_DEV;
                res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
                                              &dev->sata_dev.rps_resp);
                if (res) {
@@ -314,7 +314,7 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
        int res;
 
        /* we weren't pending, so successfully end the reset sequence now */
-       if (dev->dev_type != SATA_PENDING)
+       if (dev->dev_type != SAS_SATA_PENDING)
                return 1;
 
        /* hmmm, if this succeeds do we need to repost the domain_device to the
@@ -348,9 +348,9 @@ static int smp_ata_check_ready(struct ata_link *link)
                return 0;
 
        switch (ex_phy->attached_dev_type) {
-       case SATA_PENDING:
+       case SAS_SATA_PENDING:
                return 0;
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                if (ex_phy->attached_sata_dev)
                        return sas_ata_clear_pending(dev, ex_phy);
        default:
@@ -631,7 +631,7 @@ static void sas_get_ata_command_set(struct domain_device *dev)
        struct dev_to_host_fis *fis =
                (struct dev_to_host_fis *) dev->frame_rcvd;
 
-       if (dev->dev_type == SATA_PENDING)
+       if (dev->dev_type == SAS_SATA_PENDING)
                return;
 
        if ((fis->sector_count == 1 && /* ATA */
@@ -797,7 +797,7 @@ int sas_discover_sata(struct domain_device *dev)
 {
        int res;
 
-       if (dev->dev_type == SATA_PM)
+       if (dev->dev_type == SAS_SATA_PM)
                return -ENODEV;
 
        sas_get_ata_command_set(dev);
index a0c3003e0c7d2f6b5e1a89147ab29d58b1609aaa..62b58d38ce2e63beda6d1ba4d9876c9af2e51008 100644 (file)
 void sas_init_dev(struct domain_device *dev)
 {
        switch (dev->dev_type) {
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
                break;
-       case EDGE_DEV:
-       case FANOUT_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                INIT_LIST_HEAD(&dev->ex_dev.children);
                mutex_init(&dev->ex_dev.cmd_mutex);
                break;
@@ -93,9 +93,9 @@ static int sas_get_port_device(struct asd_sas_port *port)
                if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
                    fis->byte_count_low==0x69 && fis->byte_count_high == 0x96
                    && (fis->device & ~0x10) == 0)
-                       dev->dev_type = SATA_PM;
+                       dev->dev_type = SAS_SATA_PM;
                else
-                       dev->dev_type = SATA_DEV;
+                       dev->dev_type = SAS_SATA_DEV;
                dev->tproto = SAS_PROTOCOL_SATA;
        } else {
                struct sas_identify_frame *id =
@@ -109,21 +109,21 @@ static int sas_get_port_device(struct asd_sas_port *port)
 
        dev->port = port;
        switch (dev->dev_type) {
-       case SATA_DEV:
+       case SAS_SATA_DEV:
                rc = sas_ata_init(dev);
                if (rc) {
                        rphy = NULL;
                        break;
                }
                /* fall through */
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                rphy = sas_end_device_alloc(port->port);
                break;
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(port->port,
                                          SAS_EDGE_EXPANDER_DEVICE);
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(port->port,
                                          SAS_FANOUT_EXPANDER_DEVICE);
                break;
@@ -156,7 +156,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
        dev->rphy = rphy;
        get_device(&dev->rphy->dev);
 
-       if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
+       if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE)
                list_add_tail(&dev->disco_list_node, &port->disco_list);
        else {
                spin_lock_irq(&port->dev_list_lock);
@@ -315,7 +315,7 @@ void sas_free_device(struct kref *kref)
        dev->phy = NULL;
 
        /* remove the phys and ports, everything else should be gone */
-       if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV)
+       if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                kfree(dev->ex_dev.ex_phy);
 
        if (dev_is_sata(dev) && dev->sata_dev.ap) {
@@ -343,7 +343,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
        spin_unlock_irq(&port->dev_list_lock);
 
        spin_lock_irq(&ha->lock);
-       if (dev->dev_type == SAS_END_DEV &&
+       if (dev->dev_type == SAS_END_DEVICE &&
            !list_empty(&dev->ssp_dev.eh_list_node)) {
                list_del_init(&dev->ssp_dev.eh_list_node);
                ha->eh_active--;
@@ -457,15 +457,15 @@ static void sas_discover_domain(struct work_struct *work)
                    task_pid_nr(current));
 
        switch (dev->dev_type) {
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                error = sas_discover_end_dev(dev);
                break;
-       case EDGE_DEV:
-       case FANOUT_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                error = sas_discover_root_expander(dev);
                break;
-       case SATA_DEV:
-       case SATA_PM:
+       case SAS_SATA_DEV:
+       case SAS_SATA_PM:
 #ifdef CONFIG_SCSI_SAS_ATA
                error = sas_discover_sata(dev);
                break;
index f42b0e15410f8a52e0a570664b04547d459e7aec..446b85110a1fc0a69b07e42bb3ecc7144d76d1ce 100644 (file)
@@ -183,21 +183,21 @@ static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
        }
 }
 
-static enum sas_dev_type to_dev_type(struct discover_resp *dr)
+static enum sas_device_type to_dev_type(struct discover_resp *dr)
 {
        /* This is detecting a failure to transmit initial dev to host
         * FIS as described in section J.5 of sas-2 r16
         */
-       if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev &&
+       if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev &&
            dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
-               return SATA_PENDING;
+               return SAS_SATA_PENDING;
        else
                return dr->attached_dev_type;
 }
 
 static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
 {
-       enum sas_dev_type dev_type;
+       enum sas_device_type dev_type;
        enum sas_linkrate linkrate;
        u8 sas_addr[SAS_ADDR_SIZE];
        struct smp_resp *resp = rsp;
@@ -238,7 +238,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
        /* Handle vacant phy - rest of dr data is not valid so skip it */
        if (phy->phy_state == PHY_VACANT) {
                memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
-               phy->attached_dev_type = NO_DEVICE;
+               phy->attached_dev_type = SAS_PHY_UNUSED;
                if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
                        phy->phy_id = phy_id;
                        goto skip;
@@ -259,7 +259,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
        /* help some expanders that fail to zero sas_address in the 'no
         * device' case
         */
-       if (phy->attached_dev_type == NO_DEVICE ||
+       if (phy->attached_dev_type == SAS_PHY_UNUSED ||
            phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
                memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
        else
@@ -292,13 +292,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
 
  out:
        switch (phy->attached_dev_type) {
-       case SATA_PENDING:
+       case SAS_SATA_PENDING:
                type = "stp pending";
                break;
-       case NO_DEVICE:
+       case SAS_PHY_UNUSED:
                type = "no device";
                break;
-       case SAS_END_DEV:
+       case SAS_END_DEVICE:
                if (phy->attached_iproto) {
                        if (phy->attached_tproto)
                                type = "host+target";
@@ -311,8 +311,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
                                type = "ssp";
                }
                break;
-       case EDGE_DEV:
-       case FANOUT_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                type = "smp";
                break;
        default:
@@ -833,7 +833,7 @@ static struct domain_device *sas_ex_discover_end_dev(
        } else
 #endif
          if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
-               child->dev_type = SAS_END_DEV;
+               child->dev_type = SAS_END_DEVICE;
                rphy = sas_end_device_alloc(phy->port);
                /* FIXME: error handling */
                if (unlikely(!rphy))
@@ -932,11 +932,11 @@ static struct domain_device *sas_ex_discover_expander(
 
 
        switch (phy->attached_dev_type) {
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(phy->port,
                                          SAS_EDGE_EXPANDER_DEVICE);
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                rphy = sas_expander_alloc(phy->port,
                                          SAS_FANOUT_EXPANDER_DEVICE);
                break;
@@ -1013,7 +1013,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
                sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
 
-       if (ex_phy->attached_dev_type == NO_DEVICE) {
+       if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) {
                if (ex_phy->routing_attr == DIRECT_ROUTING) {
                        memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
                        sas_configure_routing(dev, ex_phy->attached_sas_addr);
@@ -1022,10 +1022,10 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
                return 0;
 
-       if (ex_phy->attached_dev_type != SAS_END_DEV &&
-           ex_phy->attached_dev_type != FANOUT_DEV &&
-           ex_phy->attached_dev_type != EDGE_DEV &&
-           ex_phy->attached_dev_type != SATA_PENDING) {
+       if (ex_phy->attached_dev_type != SAS_END_DEVICE &&
+           ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
+           ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+           ex_phy->attached_dev_type != SAS_SATA_PENDING) {
                SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
                            "phy 0x%x\n", ex_phy->attached_dev_type,
                            SAS_ADDR(dev->sas_addr),
@@ -1049,11 +1049,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
        }
 
        switch (ex_phy->attached_dev_type) {
-       case SAS_END_DEV:
-       case SATA_PENDING:
+       case SAS_END_DEVICE:
+       case SAS_SATA_PENDING:
                child = sas_ex_discover_end_dev(dev, phy_id);
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
                        SAS_DPRINTK("second fanout expander %016llx phy 0x%x "
                                    "attached to ex %016llx phy 0x%x\n",
@@ -1067,7 +1067,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
                        memcpy(dev->port->disc.fanout_sas_addr,
                               ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
                /* fallthrough */
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                child = sas_ex_discover_expander(dev, phy_id);
                break;
        default:
@@ -1111,8 +1111,8 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
                    phy->phy_state == PHY_NOT_PRESENT)
                        continue;
 
-               if ((phy->attached_dev_type == EDGE_DEV ||
-                    phy->attached_dev_type == FANOUT_DEV) &&
+               if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                    phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) &&
                    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
                        memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE);
@@ -1130,8 +1130,8 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev)
        u8 sub_addr[8] = {0, };
 
        list_for_each_entry(child, &ex->children, siblings) {
-               if (child->dev_type != EDGE_DEV &&
-                   child->dev_type != FANOUT_DEV)
+               if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+                   child->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
                        continue;
                if (sub_addr[0] == 0) {
                        sas_find_sub_addr(child, sub_addr);
@@ -1208,7 +1208,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
        int i;
        u8  *sub_sas_addr = NULL;
 
-       if (dev->dev_type != EDGE_DEV)
+       if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE)
                return 0;
 
        for (i = 0; i < ex->num_phys; i++) {
@@ -1218,8 +1218,8 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
                    phy->phy_state == PHY_NOT_PRESENT)
                        continue;
 
-               if ((phy->attached_dev_type == FANOUT_DEV ||
-                    phy->attached_dev_type == EDGE_DEV) &&
+               if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE ||
+                    phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) &&
                    phy->routing_attr == SUBTRACTIVE_ROUTING) {
 
                        if (!sub_sas_addr)
@@ -1245,8 +1245,8 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
                                                 struct ex_phy *child_phy)
 {
        static const char *ex_type[] = {
-               [EDGE_DEV] = "edge",
-               [FANOUT_DEV] = "fanout",
+               [SAS_EDGE_EXPANDER_DEVICE] = "edge",
+               [SAS_FANOUT_EXPANDER_DEVICE] = "fanout",
        };
        struct domain_device *parent = child->parent;
 
@@ -1321,8 +1321,8 @@ static int sas_check_parent_topology(struct domain_device *child)
        if (!child->parent)
                return 0;
 
-       if (child->parent->dev_type != EDGE_DEV &&
-           child->parent->dev_type != FANOUT_DEV)
+       if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE &&
+           child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE)
                return 0;
 
        parent_ex = &child->parent->ex_dev;
@@ -1341,8 +1341,8 @@ static int sas_check_parent_topology(struct domain_device *child)
                child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
 
                switch (child->parent->dev_type) {
-               case EDGE_DEV:
-                       if (child->dev_type == FANOUT_DEV) {
+               case SAS_EDGE_EXPANDER_DEVICE:
+                       if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                                if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
                                    child_phy->routing_attr != TABLE_ROUTING) {
                                        sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1366,7 +1366,7 @@ static int sas_check_parent_topology(struct domain_device *child)
                                }
                        }
                        break;
-               case FANOUT_DEV:
+               case SAS_FANOUT_EXPANDER_DEVICE:
                        if (parent_phy->routing_attr != TABLE_ROUTING ||
                            child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
                                sas_print_parent_topology_bug(child, parent_phy, child_phy);
@@ -1619,8 +1619,8 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
        struct domain_device *dev;
 
        list_for_each_entry(dev, &port->dev_list, dev_list_node) {
-               if (dev->dev_type == EDGE_DEV ||
-                   dev->dev_type == FANOUT_DEV) {
+               if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                   dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        struct sas_expander_device *ex =
                                rphy_to_expander_device(dev->rphy);
 
@@ -1720,7 +1720,7 @@ static int sas_get_phy_change_count(struct domain_device *dev,
 }
 
 static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
-                                   u8 *sas_addr, enum sas_dev_type *type)
+                                   u8 *sas_addr, enum sas_device_type *type)
 {
        int res;
        struct smp_resp *disc_resp;
@@ -1849,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev,
                        SAS_DPRINTK("Expander phys DID NOT change\n");
        }
        list_for_each_entry(ch, &ex->children, siblings) {
-               if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) {
+               if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        res = sas_find_bcast_dev(ch, src_dev);
                        if (*src_dev)
                                return res;
@@ -1866,8 +1866,8 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi
 
        list_for_each_entry_safe(child, n, &ex->children, siblings) {
                set_bit(SAS_DEV_GONE, &child->state);
-               if (child->dev_type == EDGE_DEV ||
-                   child->dev_type == FANOUT_DEV)
+               if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                   child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                        sas_unregister_ex_tree(port, child);
                else
                        sas_unregister_dev(port, child);
@@ -1887,8 +1887,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
                        if (SAS_ADDR(child->sas_addr) ==
                            SAS_ADDR(phy->attached_sas_addr)) {
                                set_bit(SAS_DEV_GONE, &child->state);
-                               if (child->dev_type == EDGE_DEV ||
-                                   child->dev_type == FANOUT_DEV)
+                               if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                                   child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                                        sas_unregister_ex_tree(parent->port, child);
                                else
                                        sas_unregister_dev(parent->port, child);
@@ -1916,8 +1916,8 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root,
        int res = 0;
 
        list_for_each_entry(child, &ex_root->children, siblings) {
-               if (child->dev_type == EDGE_DEV ||
-                   child->dev_type == FANOUT_DEV) {
+               if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                   child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        struct sas_expander_device *ex =
                                rphy_to_expander_device(child->rphy);
 
@@ -1970,8 +1970,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
        list_for_each_entry(child, &dev->ex_dev.children, siblings) {
                if (SAS_ADDR(child->sas_addr) ==
                    SAS_ADDR(ex_phy->attached_sas_addr)) {
-                       if (child->dev_type == EDGE_DEV ||
-                           child->dev_type == FANOUT_DEV)
+                       if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                           child->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                                res = sas_discover_bfs_by_root(child);
                        break;
                }
@@ -1979,16 +1979,16 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
        return res;
 }
 
-static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old)
+static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old)
 {
        if (old == new)
                return true;
 
        /* treat device directed resets as flutter, if we went
-        * SAS_END_DEV to SATA_PENDING the link needs recovery
+        * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery
         */
-       if ((old == SATA_PENDING && new == SAS_END_DEV) ||
-           (old == SAS_END_DEV && new == SATA_PENDING))
+       if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) ||
+           (old == SAS_END_DEVICE && new == SAS_SATA_PENDING))
                return true;
 
        return false;
@@ -1998,7 +1998,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
 {
        struct expander_device *ex = &dev->ex_dev;
        struct ex_phy *phy = &ex->ex_phy[phy_id];
-       enum sas_dev_type type = NO_DEVICE;
+       enum sas_device_type type = SAS_PHY_UNUSED;
        u8 sas_addr[8];
        int res;
 
@@ -2032,7 +2032,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
 
                sas_ex_phy_discover(dev, phy_id);
 
-               if (ata_dev && phy->attached_dev_type == SATA_PENDING)
+               if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
                        action = ", needs recovery";
                SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
                            SAS_ADDR(dev->sas_addr), phy_id, action);
index 1de67964e5a1e39ffa10151c79489078826e2536..7e7ba83f0a2139c565f692fa93d0b3f5954abcce 100644 (file)
@@ -131,16 +131,16 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
        rphy->identify.initiator_port_protocols = dev->iproto;
        rphy->identify.target_port_protocols = dev->tproto;
        switch (dev->dev_type) {
-       case SATA_DEV:
+       case SAS_SATA_DEV:
                /* FIXME: need sata device type */
-       case SAS_END_DEV:
-       case SATA_PENDING:
+       case SAS_END_DEVICE:
+       case SAS_SATA_PENDING:
                rphy->identify.device_type = SAS_END_DEVICE;
                break;
-       case EDGE_DEV:
+       case SAS_EDGE_EXPANDER_DEVICE:
                rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE;
                break;
-       case FANOUT_DEV:
+       case SAS_FANOUT_EXPANDER_DEVICE:
                rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE;
                break;
        default:
index 1398b714c01836ee3789199cf3cd627ac4fa4021..d3c5297c6c89e242d845d16142f7a309ed4ef773 100644 (file)
@@ -69,7 +69,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
                        continue;
                }
 
-               if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) {
+               if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
                        dev->ex_dev.ex_change_count = -1;
                        for (i = 0; i < dev->ex_dev.num_phys; i++) {
                                struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
index 7706c99ec8bbb1e8c70b051113f789cb6f97c1dd..bcc56cac4fd8f358d53084b598c167d060a82c19 100644 (file)
@@ -46,10 +46,15 @@ struct lpfc_sli2_slim;
 #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128      /* sg element count per scsi
                cmnd for menlo needs nearly twice as for firmware
                downloads using bsg */
-#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
+
+#define LPFC_MIN_SG_SLI4_BUF_SZ        0x800   /* based on LPFC_DEFAULT_SG_SEG_CNT */
+#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
+#define LPFC_MAX_SG_SEG_CNT_DIF 512    /* sg element count per scsi cmnd  */
 #define LPFC_MAX_SG_SEG_CNT    4096    /* sg element count per scsi cmnd */
+#define LPFC_MAX_SGL_SEG_CNT   512     /* SGL element count per scsi cmnd */
+#define LPFC_MAX_BPL_SEG_CNT   4096    /* BPL element count per scsi cmnd */
+
 #define LPFC_MAX_SGE_SIZE       0x80000000 /* Maximum data allowed in a SGE */
-#define LPFC_MAX_PROT_SG_SEG_CNT 4096  /* prot sg element count per scsi cmd*/
 #define LPFC_IOCB_LIST_CNT     2250    /* list of IOCBs for fast-path usage. */
 #define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
 #define LPFC_VNAME_LEN         100     /* vport symbolic name length */
@@ -66,8 +71,10 @@ struct lpfc_sli2_slim;
  * queue depths when there are driver resource error or Firmware
  * resource error.
  */
-#define QUEUE_RAMP_DOWN_INTERVAL       (1 * HZ)   /* 1 Second */
-#define QUEUE_RAMP_UP_INTERVAL         (300 * HZ) /* 5 minutes */
+/* 1 Second */
+#define QUEUE_RAMP_DOWN_INTERVAL       (msecs_to_jiffies(1000 * 1))
+/* 5 minutes */
+#define QUEUE_RAMP_UP_INTERVAL         (msecs_to_jiffies(1000 * 300))
 
 /* Number of exchanges reserved for discovery to complete */
 #define LPFC_DISC_IOCB_BUFF_COUNT 20
@@ -671,6 +678,7 @@ struct lpfc_hba {
        uint32_t lmt;
 
        uint32_t fc_topology;   /* link topology, from LINK INIT */
+       uint32_t fc_topology_changed;   /* link topology, from LINK INIT */
 
        struct lpfc_stats fc_stat;
 
@@ -701,9 +709,11 @@ struct lpfc_hba {
        uint32_t cfg_poll_tmo;
        uint32_t cfg_use_msi;
        uint32_t cfg_fcp_imax;
+       uint32_t cfg_fcp_cpu_map;
        uint32_t cfg_fcp_wq_count;
        uint32_t cfg_fcp_eq_count;
        uint32_t cfg_fcp_io_channel;
+       uint32_t cfg_total_seg_cnt;
        uint32_t cfg_sg_seg_cnt;
        uint32_t cfg_prot_sg_seg_cnt;
        uint32_t cfg_sg_dma_buf_size;
@@ -804,8 +814,10 @@ struct lpfc_hba {
        uint64_t bg_reftag_err_cnt;
 
        /* fastpath list. */
-       spinlock_t scsi_buf_list_lock;
-       struct list_head lpfc_scsi_buf_list;
+       spinlock_t scsi_buf_list_get_lock;  /* SCSI buf alloc list lock */
+       spinlock_t scsi_buf_list_put_lock;  /* SCSI buf free list lock */
+       struct list_head lpfc_scsi_buf_list_get;
+       struct list_head lpfc_scsi_buf_list_put;
        uint32_t total_scsi_bufs;
        struct list_head lpfc_iocb_list;
        uint32_t total_iocbq_bufs;
index 9290713af253fd740454bbee6d2b6807f64f4b93..3c5625b8b1f4e83814f878c590f27377f4aaecbf 100644 (file)
@@ -674,6 +674,9 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
        int i;
        int rc;
 
+       if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+               return 0;
+
        init_completion(&online_compl);
        rc = lpfc_workq_post_event(phba, &status, &online_compl,
                              LPFC_EVT_OFFLINE_PREP);
@@ -741,7 +744,8 @@ lpfc_selective_reset(struct lpfc_hba *phba)
        int status = 0;
        int rc;
 
-       if (!phba->cfg_enable_hba_reset)
+       if ((!phba->cfg_enable_hba_reset) ||
+           (phba->pport->fc_flag & FC_OFFLINE_MODE))
                return -EACCES;
 
        status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -895,6 +899,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
                pci_disable_sriov(pdev);
                phba->cfg_sriov_nr_virtfn = 0;
        }
+
        status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
 
        if (status != 0)
@@ -2801,6 +2806,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
                lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
                        "3054 lpfc_topology changed from %d to %d\n",
                        prev_val, val);
+               if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
+                       phba->fc_topology_changed = 1;
                err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
                if (err) {
                        phba->cfg_topology = prev_val;
@@ -3792,6 +3799,141 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
 static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
                   lpfc_fcp_imax_show, lpfc_fcp_imax_store);
 
+/**
+ * lpfc_state_show - Display current driver CPU affinity
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains text describing the state of the link.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
+                     char *buf)
+{
+       struct Scsi_Host  *shost = class_to_shost(dev);
+       struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
+       struct lpfc_vector_map_info *cpup;
+       int  idx, len = 0;
+
+       if ((phba->sli_rev != LPFC_SLI_REV4) ||
+           (phba->intr_type != MSIX))
+               return len;
+
+       switch (phba->cfg_fcp_cpu_map) {
+       case 0:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "fcp_cpu_map: No mapping (%d)\n",
+                               phba->cfg_fcp_cpu_map);
+               return len;
+       case 1:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "fcp_cpu_map: HBA centric mapping (%d): "
+                               "%d online CPUs\n",
+                               phba->cfg_fcp_cpu_map,
+                               phba->sli4_hba.num_online_cpu);
+               break;
+       case 2:
+               len += snprintf(buf + len, PAGE_SIZE-len,
+                               "fcp_cpu_map: Driver centric mapping (%d): "
+                               "%d online CPUs\n",
+                               phba->cfg_fcp_cpu_map,
+                               phba->sli4_hba.num_online_cpu);
+               break;
+       }
+
+       cpup = phba->sli4_hba.cpu_map;
+       for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+               if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)
+                       len += snprintf(buf + len, PAGE_SIZE-len,
+                                       "CPU %02d io_chan %02d "
+                                       "physid %d coreid %d\n",
+                                       idx, cpup->channel_id, cpup->phys_id,
+                                       cpup->core_id);
+               else
+                       len += snprintf(buf + len, PAGE_SIZE-len,
+                                       "CPU %02d io_chan %02d "
+                                       "physid %d coreid %d IRQ %d\n",
+                                       idx, cpup->channel_id, cpup->phys_id,
+                                       cpup->core_id, cpup->irq);
+
+               cpup++;
+       }
+       return len;
+}
+
+/**
+ * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: one or more lpfc_polling_flags values.
+ * @count: not used.
+ *
+ * Returns:
+ * -EINVAL  - Not implemented yet.
+ **/
+static ssize_t
+lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
+                      const char *buf, size_t count)
+{
+       int status = -EINVAL;
+       return status;
+}
+
+/*
+# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
+# for the HBA.
+#
+# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2).
+#      0 - Do not affinitze IRQ vectors
+#      1 - Affintize HBA vectors with respect to each HBA
+#          (start with CPU0 for each HBA)
+#      2 - Affintize HBA vectors with respect to the entire driver
+#          (round robin thru all CPUs across all HBAs)
+*/
+static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_fcp_cpu_map,
+                "Defines how to map CPUs to IRQ vectors per HBA");
+
+/**
+ * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [0-2], then affinitze the adapter's
+ * MSIX vectors.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
+{
+       if (phba->sli_rev != LPFC_SLI_REV4) {
+               phba->cfg_fcp_cpu_map = 0;
+               return 0;
+       }
+
+       if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
+               phba->cfg_fcp_cpu_map = val;
+               return 0;
+       }
+
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "3326 fcp_cpu_map: %d out of range, using default\n",
+                       val);
+       phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
+
+       return 0;
+}
+
+static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR,
+                  lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store);
+
 /*
 # lpfc_fcp_class:  Determines FC class to use for the FCP protocol.
 # Value range is [2,3]. Default value is 3.
@@ -4009,12 +4151,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
 #       0  = disabled (default)
 #       1  = enabled
 # Value range is [0,1]. Default value is 0.
+#
+# This feature in under investigation and may be supported in the future.
 */
 unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
 
-module_param(lpfc_fcp_look_ahead, uint, S_IRUGO);
-MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions");
-
 /*
 # lpfc_prot_mask: i
 #      - Bit mask of host protection capabilities used to register with the
@@ -4071,16 +4212,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery,
 
 /*
  * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
- * This value can be set to values between 64 and 256. The default value is
+ * This value can be set to values between 64 and 4096. The default value is
  * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
  * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ * Because of the additional overhead involved in setting up T10-DIF,
+ * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
+ * and will be limited to 512 if BlockGuard is enabled under SLI3.
  */
 LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
            LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
 
-LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT,
-               LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT,
-               "Max Protection Scatter Gather Segment Count");
+/*
+ * This parameter will be depricated, the driver cannot limit the
+ * protection data s/g list.
+ */
+LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
+           LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
+           "Max Protection Scatter Gather Segment Count");
 
 struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_bg_info,
@@ -4141,6 +4289,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_poll_tmo,
        &dev_attr_lpfc_use_msi,
        &dev_attr_lpfc_fcp_imax,
+       &dev_attr_lpfc_fcp_cpu_map,
        &dev_attr_lpfc_fcp_wq_count,
        &dev_attr_lpfc_fcp_eq_count,
        &dev_attr_lpfc_fcp_io_channel,
@@ -5123,6 +5272,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
        lpfc_use_msi_init(phba, lpfc_use_msi);
        lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+       lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
        lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
        lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
        lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
index 8886668920049e685cb9b5d3572cb1426dbea526..094be2cad65bba821e9050a48bb2602dfd65f067 100644 (file)
@@ -219,26 +219,35 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
        unsigned int transfer_bytes, bytes_copied = 0;
        unsigned int sg_offset, dma_offset;
        unsigned char *dma_address, *sg_address;
-       struct scatterlist *sgel;
        LIST_HEAD(temp_list);
-
+       struct sg_mapping_iter miter;
+       unsigned long flags;
+       unsigned int sg_flags = SG_MITER_ATOMIC;
+       bool sg_valid;
 
        list_splice_init(&dma_buffers->list, &temp_list);
        list_add(&dma_buffers->list, &temp_list);
        sg_offset = 0;
-       sgel = bsg_buffers->sg_list;
+       if (to_buffers)
+               sg_flags |= SG_MITER_FROM_SG;
+       else
+               sg_flags |= SG_MITER_TO_SG;
+       sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
+                      sg_flags);
+       local_irq_save(flags);
+       sg_valid = sg_miter_next(&miter);
        list_for_each_entry(mp, &temp_list, list) {
                dma_offset = 0;
-               while (bytes_to_transfer && sgel &&
+               while (bytes_to_transfer && sg_valid &&
                       (dma_offset < LPFC_BPL_SIZE)) {
                        dma_address = mp->virt + dma_offset;
                        if (sg_offset) {
                                /* Continue previous partial transfer of sg */
-                               sg_address = sg_virt(sgel) + sg_offset;
-                               transfer_bytes = sgel->length - sg_offset;
+                               sg_address = miter.addr + sg_offset;
+                               transfer_bytes = miter.length - sg_offset;
                        } else {
-                               sg_address = sg_virt(sgel);
-                               transfer_bytes = sgel->length;
+                               sg_address = miter.addr;
+                               transfer_bytes = miter.length;
                        }
                        if (bytes_to_transfer < transfer_bytes)
                                transfer_bytes = bytes_to_transfer;
@@ -252,12 +261,14 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
                        sg_offset += transfer_bytes;
                        bytes_to_transfer -= transfer_bytes;
                        bytes_copied += transfer_bytes;
-                       if (sg_offset >= sgel->length) {
+                       if (sg_offset >= miter.length) {
                                sg_offset = 0;
-                               sgel = sg_next(sgel);
+                               sg_valid = sg_miter_next(&miter);
                        }
                }
        }
+       sg_miter_stop(&miter);
+       local_irq_restore(flags);
        list_del_init(&dma_buffers->list);
        list_splice(&temp_list, &dma_buffers->list);
        return bytes_copied;
@@ -471,6 +482,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
        cmdiocbq->context1 = dd_data;
        cmdiocbq->context2 = cmp;
        cmdiocbq->context3 = bmp;
+       cmdiocbq->context_un.ndlp = ndlp;
        dd_data->type = TYPE_IOCB;
        dd_data->set_job = job;
        dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
@@ -1508,6 +1520,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
        ctiocb->context1 = dd_data;
        ctiocb->context2 = cmp;
        ctiocb->context3 = bmp;
+       ctiocb->context_un.ndlp = ndlp;
        ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
 
        dd_data->type = TYPE_IOCB;
@@ -2576,7 +2589,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
        evt->wait_time_stamp = jiffies;
        time_left = wait_event_interruptible_timeout(
                evt->wq, !list_empty(&evt->events_to_see),
-               ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+               msecs_to_jiffies(1000 *
+                       ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
        if (list_empty(&evt->events_to_see))
                ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
        else {
@@ -3151,7 +3165,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
        evt->waiting = 1;
        time_left = wait_event_interruptible_timeout(
                evt->wq, !list_empty(&evt->events_to_see),
-               ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
+               msecs_to_jiffies(1000 *
+                       ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
        evt->waiting = 0;
        if (list_empty(&evt->events_to_see)) {
                rc = (time_left) ? -EINTR : -ETIMEDOUT;
index 7631893ae00511c49033f3445e45f8b164d47d46..d41456e5f8149e3fc09d82086554d20ef9921ce2 100644 (file)
@@ -470,3 +470,4 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
 void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
 uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
 int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
+void lpfc_sli4_offline_eratt(struct lpfc_hba *);
index 7bff3a19af56880dba0fdca4e203676d52c4e916..ae1a07c57caef681d5d54222e39b41b67e70643f 100644 (file)
@@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
                if (init_utsname()->nodename[0] != '\0')
                        lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
                else
-                       mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+                       mod_timer(&vport->fc_fdmitmo, jiffies +
+                                 msecs_to_jiffies(1000 * 60));
        }
        return;
 }
index bbed8471bf0b81fce50390bb9b0ec6b7a68ba146..3cae0a92e8bd0001142c7a175102de1e0ab01dc6 100644 (file)
@@ -29,6 +29,7 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+
 #include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
@@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
 
                icmd->un.elsreq64.remoteID = did;               /* DID */
                icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
-               icmd->ulpTimeout = phba->fc_ratov * 2;
+               if (elscmd == ELS_CMD_FLOGI)
+                       icmd->ulpTimeout = FF_DEF_RATOV * 2;
+               else
+                       icmd->ulpTimeout = phba->fc_ratov * 2;
        } else {
                icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
                icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
@@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
                /* Xmit ELS command <elsCmd> to remote NPORT <did> */
                lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                                 "0116 Xmit ELS command x%x to remote "
-                                "NPORT x%x I/O tag: x%x, port state: x%x\n",
+                                "NPORT x%x I/O tag: x%x, port state:x%x"
+                                " fc_flag:x%x\n",
                                 elscmd, did, elsiocb->iotag,
-                                vport->port_state);
+                                vport->port_state,
+                                vport->fc_flag);
        } else {
                /* Xmit ELS response <elsCmd> to remote NPORT <did> */
                lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                                 "0117 Xmit ELS response x%x to remote "
-                                "NPORT x%x I/O tag: x%x, size: x%x\n",
+                                "NPORT x%x I/O tag: x%x, size: x%x "
+                                "port_state x%x fc_flag x%x\n",
                                 elscmd, ndlp->nlp_DID, elsiocb->iotag,
-                                cmdSize);
+                                cmdSize, vport->port_state,
+                                vport->fc_flag);
        }
        return elsiocb;
 
@@ -909,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        spin_lock_irq(shost->host_lock);
        vport->fc_flag |= FC_PT2PT;
        spin_unlock_irq(shost->host_lock);
+       /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+       if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
+               lpfc_unregister_fcf_prep(phba);
+
+               /* The FC_VFI_REGISTERED flag will get clear in the cmpl
+                * handler for unreg_vfi, but if we don't force the
+                * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be
+                * built with the update bit set instead of just the vp bit to
+                * change the Nport ID.  We need to have the vp set and the
+                * Upd cleared on topology changes.
+                */
+               spin_lock_irq(shost->host_lock);
+               vport->fc_flag &= ~FC_VFI_REGISTERED;
+               spin_unlock_irq(shost->host_lock);
+               phba->fc_topology_changed = 0;
+               lpfc_issue_reg_vfi(vport);
+       }
 
        /* Start discovery - this should just do CLEAR_LA */
        lpfc_disc_start(vport);
@@ -1030,9 +1055,19 @@ stop_rr_fcf_flogi:
                        vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
                if ((phba->sli_rev == LPFC_SLI_REV4) &&
                    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
-                    (vport->fc_prevDID != vport->fc_myDID))) {
-                       if (vport->fc_flag & FC_VFI_REGISTERED)
-                               lpfc_sli4_unreg_all_rpis(vport);
+                    (vport->fc_prevDID != vport->fc_myDID) ||
+                       phba->fc_topology_changed)) {
+                       if (vport->fc_flag & FC_VFI_REGISTERED) {
+                               if (phba->fc_topology_changed) {
+                                       lpfc_unregister_fcf_prep(phba);
+                                       spin_lock_irq(shost->host_lock);
+                                       vport->fc_flag &= ~FC_VFI_REGISTERED;
+                                       spin_unlock_irq(shost->host_lock);
+                                       phba->fc_topology_changed = 0;
+                               } else {
+                                       lpfc_sli4_unreg_all_rpis(vport);
+                               }
+                       }
                        lpfc_issue_reg_vfi(vport);
                        lpfc_nlp_put(ndlp);
                        goto out;
@@ -1054,10 +1089,11 @@ stop_rr_fcf_flogi:
 
        /* FLOGI completes successfully */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                        "0101 FLOGI completes successfully "
-                        "Data: x%x x%x x%x x%x\n",
+                        "0101 FLOGI completes successfully, I/O tag:x%x, "
+                        "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
                         irsp->un.ulpWord[4], sp->cmn.e_d_tov,
-                        sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
+                        sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
+                        vport->port_state, vport->fc_flag);
 
        if (vport->port_state == LPFC_FLOGI) {
                /*
@@ -5047,6 +5083,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
        struct ls_rjt stat;
        uint32_t cmd, did;
        int rc;
+       uint32_t fc_flag = 0;
+       uint32_t port_state = 0;
 
        cmd = *lp++;
        sp = (struct serv_parm *) lp;
@@ -5113,16 +5151,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                         * will be.
                         */
                        vport->fc_myDID = PT2PT_LocalID;
-               }
+               } else
+                       vport->fc_myDID = PT2PT_RemoteID;
 
                /*
                 * The vport state should go to LPFC_FLOGI only
                 * AFTER we issue a FLOGI, not receive one.
                 */
                spin_lock_irq(shost->host_lock);
+               fc_flag = vport->fc_flag;
+               port_state = vport->port_state;
                vport->fc_flag |= FC_PT2PT;
                vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+               vport->port_state = LPFC_FLOGI;
                spin_unlock_irq(shost->host_lock);
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                                "3311 Rcv Flogi PS x%x new PS x%x "
+                                "fc_flag x%x new fc_flag x%x\n",
+                                port_state, vport->port_state,
+                                fc_flag, vport->fc_flag);
 
                /*
                 * We temporarily set fc_myDID to make it look like we are
@@ -6241,7 +6288,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
        }
 
        if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
-               mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+               mod_timer(&vport->els_tmofunc,
+                         jiffies + msecs_to_jiffies(1000 * timeout));
 }
 
 /**
@@ -6612,7 +6660,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        /* ELS command <elsCmd> received from NPORT <did> */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
                         "0112 ELS command x%x received from NPORT x%x "
-                        "Data: x%x\n", cmd, did, vport->port_state);
+                        "Data: x%x x%x x%x x%x\n",
+                       cmd, did, vport->port_state, vport->fc_flag,
+                       vport->fc_myDID, vport->fc_prevDID);
        switch (cmd) {
        case ELS_CMD_PLOGI:
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6621,6 +6671,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
                phba->fc_stat.elsRcvPLOGI++;
                ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
+               if (phba->sli_rev == LPFC_SLI_REV4 &&
+                   (phba->pport->fc_flag & FC_PT2PT)) {
+                       vport->fc_prevDID = vport->fc_myDID;
+                       /* Our DID needs to be updated before registering
+                        * the vfi. This is done in lpfc_rcv_plogi but
+                        * that is called after the reg_vfi.
+                        */
+                       vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
+                       lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+                                        "3312 Remote port assigned DID x%x "
+                                        "%x\n", vport->fc_myDID,
+                                        vport->fc_prevDID);
+               }
 
                lpfc_send_els_event(vport, ndlp, payload);
 
@@ -6630,6 +6693,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        rjt_exp = LSEXP_NOTHING_MORE;
                        break;
                }
+               shost = lpfc_shost_from_vport(vport);
                if (vport->port_state < LPFC_DISC_AUTH) {
                        if (!(phba->pport->fc_flag & FC_PT2PT) ||
                                (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6641,9 +6705,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                         * another NPort and the other side has initiated
                         * the PLOGI before responding to our FLOGI.
                         */
+                       if (phba->sli_rev == LPFC_SLI_REV4 &&
+                           (phba->fc_topology_changed ||
+                            vport->fc_myDID != vport->fc_prevDID)) {
+                               lpfc_unregister_fcf_prep(phba);
+                               spin_lock_irq(shost->host_lock);
+                               vport->fc_flag &= ~FC_VFI_REGISTERED;
+                               spin_unlock_irq(shost->host_lock);
+                               phba->fc_topology_changed = 0;
+                               lpfc_issue_reg_vfi(vport);
+                       }
                }
 
-               shost = lpfc_shost_from_vport(vport);
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
                spin_unlock_irq(shost->host_lock);
@@ -7002,8 +7075,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
        spin_lock_irq(shost->host_lock);
        if (vport->fc_flag & FC_DISC_DELAYED) {
                spin_unlock_irq(shost->host_lock);
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                               "3334 Delay fc port discovery for %d seconds\n",
+                               phba->fc_ratov);
                mod_timer(&vport->delayed_disc_tmo,
-                       jiffies + HZ * phba->fc_ratov);
+                       jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
                return;
        }
        spin_unlock_irq(shost->host_lock);
@@ -7287,7 +7363,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
                return;
 
        shost = lpfc_shost_from_vport(phba->pport);
-       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DELAY_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -7791,7 +7867,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
        blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
        /* Start a timer to unblock fabric iocbs after 100ms */
        if (!blocked)
-               mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
+               mod_timer(&phba->fabric_block_timer,
+                         jiffies + msecs_to_jiffies(100));
 
        return;
 }
index 326e05a65a7314d7648a16f0097485bb5d439a0b..0f6e2548f35d7b7b03d870b813b5c1726bfcc48d 100644 (file)
@@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
        if (!list_empty(&evtp->evt_listp))
                return;
 
+       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+
        spin_lock_irq(&phba->hbalock);
        /* We need to hold the node by incrementing the reference
         * count until this queued work is done
         */
-       evtp->evt_arg1  = lpfc_nlp_get(ndlp);
        if (evtp->evt_arg1) {
                evtp->evt = LPFC_EVT_DEV_LOSS;
                list_add_tail(&evtp->evt_listp, &phba->work_list);
@@ -1008,9 +1009,6 @@ lpfc_linkup(struct lpfc_hba *phba)
                for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
                        lpfc_linkup_port(vports[i]);
        lpfc_destroy_vport_work_array(phba, vports);
-       if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
-           (phba->sli_rev < LPFC_SLI_REV4))
-               lpfc_issue_clear_la(phba, phba->pport);
 
        return 0;
 }
@@ -1436,7 +1434,8 @@ lpfc_register_fcf(struct lpfc_hba *phba)
        if (phba->fcf.fcf_flag & FCF_REGISTERED) {
                phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
                phba->hba_flag &= ~FCF_TS_INPROG;
-               if (phba->pport->port_state != LPFC_FLOGI) {
+               if (phba->pport->port_state != LPFC_FLOGI &&
+                   phba->pport->fc_flag & FC_FABRIC) {
                        phba->hba_flag |= FCF_RR_INPROG;
                        spin_unlock_irq(&phba->hbalock);
                        lpfc_initial_flogi(phba->pport);
@@ -2270,8 +2269,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                                spin_unlock_irq(&phba->hbalock);
                                lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
                                                "2836 New FCF matches in-use "
-                                               "FCF (x%x)\n",
-                                               phba->fcf.current_rec.fcf_indx);
+                                               "FCF (x%x), port_state:x%x, "
+                                               "fc_flag:x%x\n",
+                                               phba->fcf.current_rec.fcf_indx,
+                                               phba->pport->port_state,
+                                               phba->pport->fc_flag);
                                goto out;
                        } else
                                lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
@@ -2796,7 +2798,19 @@ void
 lpfc_issue_init_vpi(struct lpfc_vport *vport)
 {
        LPFC_MBOXQ_t *mboxq;
-       int rc;
+       int rc, vpi;
+
+       if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
+               vpi = lpfc_alloc_vpi(vport->phba);
+               if (!vpi) {
+                       lpfc_printf_vlog(vport, KERN_ERR,
+                                        LOG_MBOX,
+                                        "3303 Failed to obtain vport vpi\n");
+                       lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+                       return;
+               }
+               vport->vpi = vpi;
+       }
 
        mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
        if (!mboxq) {
@@ -2894,9 +2908,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                goto out_free_mem;
        }
 
-       /* If the VFI is already registered, there is nothing else to do */
+       /* If the VFI is already registered, there is nothing else to do
+        * Unless this was a VFI update and we are in PT2PT mode, then
+        * we should drop through to set the port state to ready.
+        */
        if (vport->fc_flag & FC_VFI_REGISTERED)
-               goto out_free_mem;
+               if (!(phba->sli_rev == LPFC_SLI_REV4 &&
+                     vport->fc_flag & FC_PT2PT))
+                       goto out_free_mem;
 
        /* The VPI is implicitly registered when the VFI is registered */
        spin_lock_irq(shost->host_lock);
@@ -2913,6 +2932,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                goto out_free_mem;
        }
 
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                        "3313 cmpl reg vfi  port_state:%x fc_flag:%x myDid:%x "
+                        "alpacnt:%d LinkState:%x topology:%x\n",
+                        vport->port_state, vport->fc_flag, vport->fc_myDID,
+                        vport->phba->alpa_map[0],
+                        phba->link_state, phba->fc_topology);
+
        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
                /*
                 * For private loop or for NPort pt2pt,
@@ -2925,7 +2951,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                        /* Use loop map to make discovery list */
                        lpfc_disc_list_loopmap(vport);
                        /* Start discovery */
-                       lpfc_disc_start(vport);
+                       if (vport->fc_flag & FC_PT2PT)
+                               vport->port_state = LPFC_VPORT_READY;
+                       else
+                               lpfc_disc_start(vport);
                } else {
                        lpfc_start_fdiscs(phba);
                        lpfc_do_scr_ns_plogi(phba, vport);
@@ -3007,6 +3036,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
                break;
        }
 
+       if (phba->fc_topology &&
+           phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                               "3314 Toplogy changed was 0x%x is 0x%x\n",
+                               phba->fc_topology,
+                               bf_get(lpfc_mbx_read_top_topology, la));
+               phba->fc_topology_changed = 1;
+       }
+
        phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
        phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
 
@@ -4235,7 +4273,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport)
                        tmo, vport->port_state, vport->fc_flag);
        }
 
-       mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
+       mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
        spin_lock_irq(shost->host_lock);
        vport->fc_flag |= FC_DISC_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -4949,8 +4987,12 @@ lpfc_disc_start(struct lpfc_vport *vport)
        uint32_t clear_la_pending;
        int did_changed;
 
-       if (!lpfc_is_link_up(phba))
+       if (!lpfc_is_link_up(phba)) {
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                                "3315 Link is not up %x\n",
+                                phba->link_state);
                return;
+       }
 
        if (phba->link_state == LPFC_CLEAR_LA)
                clear_la_pending = 1;
@@ -4983,11 +5025,13 @@ lpfc_disc_start(struct lpfc_vport *vport)
        if (num_sent)
                return;
 
-       /* Register the VPI for SLI3, NON-NPIV only. */
+       /* Register the VPI for SLI3, NPIV only. */
        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
            !(vport->fc_flag & FC_PT2PT) &&
            !(vport->fc_flag & FC_RSCN_MODE) &&
            (phba->sli_rev < LPFC_SLI_REV4)) {
+               if (vport->port_type == LPFC_PHYSICAL_PORT)
+                       lpfc_issue_clear_la(phba, vport);
                lpfc_issue_reg_vpi(phba, vport);
                return;
        }
@@ -5410,7 +5454,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        if (vport->cfg_fdmi_on == 1)
                lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
        else
-               mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
+               mod_timer(&vport->fc_fdmitmo,
+                         jiffies + msecs_to_jiffies(1000 * 60));
 
        /* decrement the node reference count held for this callback
         * function.
@@ -5855,7 +5900,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
        struct lpfc_vport **vports;
        struct lpfc_nodelist *ndlp;
        struct Scsi_Host *shost;
-       int i, rc;
+       int i = 0, rc;
 
        /* Unregister RPIs */
        if (lpfc_fcf_inuse(phba))
@@ -5883,6 +5928,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
                        spin_unlock_irq(shost->host_lock);
                }
        lpfc_destroy_vport_work_array(phba, vports);
+       if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
+               ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
+               if (ndlp)
+                       lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
+               lpfc_cleanup_pending_mbox(phba->pport);
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       lpfc_sli4_unreg_all_rpis(phba->pport);
+               lpfc_mbx_unreg_vpi(phba->pport);
+               shost = lpfc_shost_from_vport(phba->pport);
+               spin_lock_irq(shost->host_lock);
+               phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+               phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
+               spin_unlock_irq(shost->host_lock);
+       }
 
        /* Cleanup any outstanding ELS commands */
        lpfc_els_flush_all_cmd(phba);
index e8c47603170370ca0d2418bb3e4d759b377d60e3..83700c18f4688a872894aec6da35d7c46058580a 100644 (file)
@@ -1667,6 +1667,7 @@ enum lpfc_protgrp_type {
 #define        BG_OP_IN_CSUM_OUT_CSUM          0x5
 #define        BG_OP_IN_CRC_OUT_CSUM           0x6
 #define        BG_OP_IN_CSUM_OUT_CRC           0x7
+#define        BG_OP_RAW_MODE                  0x8
 
 struct lpfc_pde5 {
        uint32_t word0;
index 1dd2f6f0a1272f4717e6063449879b55b06ce4b0..713a4613ec3ac1c22829fa9d150d6c9767ae3d80 100644 (file)
@@ -200,6 +200,11 @@ struct lpfc_sli_intf {
 #define LPFC_MAX_IMAX          5000000
 #define LPFC_DEF_IMAX          50000
 
+#define LPFC_MIN_CPU_MAP       0
+#define LPFC_MAX_CPU_MAP       2
+#define LPFC_HBA_CPU_MAP       1
+#define LPFC_DRIVER_CPU_MAP    2  /* Default */
+
 /* PORT_CAPABILITIES constants. */
 #define LPFC_MAX_SUPPORTED_PAGES       8
 
@@ -621,7 +626,7 @@ struct lpfc_register {
 #define lpfc_sliport_status_rdy_SHIFT  23
 #define lpfc_sliport_status_rdy_MASK   0x1
 #define lpfc_sliport_status_rdy_WORD   word0
-#define MAX_IF_TYPE_2_RESETS   1000
+#define MAX_IF_TYPE_2_RESETS           6
 
 #define LPFC_CTL_PORT_CTL_OFFSET       0x408
 #define lpfc_sliport_ctrl_end_SHIFT    30
index 90b8b0515e23d7c0ee0f309d640c5fb6e94e8988..cb465b253910b9972eb48639d4527162a7a0b0b9 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/firmware.h>
 #include <linux/miscdevice.h>
+#include <linux/percpu.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -58,6 +59,9 @@ char *_dump_buf_dif;
 unsigned long _dump_buf_dif_order;
 spinlock_t _dump_buf_lock;
 
+/* Used when mapping IRQ vectors in a driver centric manner */
+uint16_t lpfc_used_cpu[LPFC_MAX_CPU];
+
 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
 static int lpfc_post_rcv_buf(struct lpfc_hba *);
 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
@@ -541,13 +545,16 @@ lpfc_config_port_post(struct lpfc_hba *phba)
 
        /* Set up ring-0 (ELS) timer */
        timeout = phba->fc_ratov * 2;
-       mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
+       mod_timer(&vport->els_tmofunc,
+                 jiffies + msecs_to_jiffies(1000 * timeout));
        /* Set up heart beat (HB) timer */
-       mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+       mod_timer(&phba->hb_tmofunc,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        phba->hb_outstanding = 0;
        phba->last_completion_time = jiffies;
        /* Set up error attention (ERATT) polling timer */
-       mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+       mod_timer(&phba->eratt_poll,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 
        if (phba->hba_flag & LINK_DISABLED) {
                lpfc_printf_log(phba,
@@ -908,9 +915,9 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
                psb->pCmd = NULL;
                psb->status = IOSTAT_SUCCESS;
        }
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-       list_splice(&aborts, &phba->lpfc_scsi_buf_list);
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+       list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
        return 0;
 }
 
@@ -1021,7 +1028,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
                !(phba->link_state == LPFC_HBA_ERROR) &&
                !(phba->pport->load_flag & FC_UNLOADING))
                mod_timer(&phba->hb_tmofunc,
-                       jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                         jiffies +
+                         msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        return;
 }
 
@@ -1064,15 +1072,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
 
        spin_lock_irq(&phba->pport->work_port_lock);
 
-       if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
-               jiffies)) {
+       if (time_after(phba->last_completion_time +
+                       msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+                       jiffies)) {
                spin_unlock_irq(&phba->pport->work_port_lock);
                if (!phba->hb_outstanding)
                        mod_timer(&phba->hb_tmofunc,
-                               jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
                else
                        mod_timer(&phba->hb_tmofunc,
-                               jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                return;
        }
        spin_unlock_irq(&phba->pport->work_port_lock);
@@ -1104,7 +1115,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                if (!pmboxq) {
                                        mod_timer(&phba->hb_tmofunc,
                                                 jiffies +
-                                                HZ * LPFC_HB_MBOX_INTERVAL);
+                                                msecs_to_jiffies(1000 *
+                                                LPFC_HB_MBOX_INTERVAL));
                                        return;
                                }
 
@@ -1120,7 +1132,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                                        phba->mbox_mem_pool);
                                        mod_timer(&phba->hb_tmofunc,
                                                jiffies +
-                                               HZ * LPFC_HB_MBOX_INTERVAL);
+                                               msecs_to_jiffies(1000 *
+                                               LPFC_HB_MBOX_INTERVAL));
                                        return;
                                }
                                phba->skipped_hb = 0;
@@ -1136,7 +1149,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                phba->skipped_hb = jiffies;
 
                        mod_timer(&phba->hb_tmofunc,
-                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                                jiffies +
+                                msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                        return;
                } else {
                        /*
@@ -1150,7 +1164,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                        jiffies_to_msecs(jiffies
                                                 - phba->last_completion_time));
                        mod_timer(&phba->hb_tmofunc,
-                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
+                               jiffies +
+                               msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
                }
        }
 }
@@ -1191,7 +1206,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
  * other than Port Error 6 has been detected.
  **/
-static void
+void
 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
 {
        lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
@@ -2633,6 +2648,7 @@ lpfc_online(struct lpfc_hba *phba)
        struct lpfc_vport *vport;
        struct lpfc_vport **vports;
        int i;
+       bool vpis_cleared = false;
 
        if (!phba)
                return 0;
@@ -2656,6 +2672,10 @@ lpfc_online(struct lpfc_hba *phba)
                        lpfc_unblock_mgmt_io(phba);
                        return 1;
                }
+               spin_lock_irq(&phba->hbalock);
+               if (!phba->sli4_hba.max_cfg_param.vpi_used)
+                       vpis_cleared = true;
+               spin_unlock_irq(&phba->hbalock);
        } else {
                if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
                        lpfc_unblock_mgmt_io(phba);
@@ -2672,8 +2692,13 @@ lpfc_online(struct lpfc_hba *phba)
                        vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
                        if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
                                vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
-                       if (phba->sli_rev == LPFC_SLI_REV4)
+                       if (phba->sli_rev == LPFC_SLI_REV4) {
                                vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+                               if ((vpis_cleared) &&
+                                   (vports[i]->port_type !=
+                                       LPFC_PHYSICAL_PORT))
+                                       vports[i]->vpi = 0;
+                       }
                        spin_unlock_irq(shost->host_lock);
                }
                lpfc_destroy_vport_work_array(phba, vports);
@@ -2833,16 +2858,30 @@ lpfc_scsi_free(struct lpfc_hba *phba)
        struct lpfc_iocbq *io, *io_next;
 
        spin_lock_irq(&phba->hbalock);
+
        /* Release all the lpfc_scsi_bufs maintained by this host. */
-       spin_lock(&phba->scsi_buf_list_lock);
-       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
+
+       spin_lock(&phba->scsi_buf_list_put_lock);
+       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
+                                list) {
                list_del(&sb->list);
                pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
                              sb->dma_handle);
                kfree(sb);
                phba->total_scsi_bufs--;
        }
-       spin_unlock(&phba->scsi_buf_list_lock);
+       spin_unlock(&phba->scsi_buf_list_put_lock);
+
+       spin_lock(&phba->scsi_buf_list_get_lock);
+       list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
+                                list) {
+               list_del(&sb->list);
+               pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
+                             sb->dma_handle);
+               kfree(sb);
+               phba->total_scsi_bufs--;
+       }
+       spin_unlock(&phba->scsi_buf_list_get_lock);
 
        /* Release all the lpfc_iocbq entries maintained by this host. */
        list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
@@ -2978,9 +3017,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                        phba->sli4_hba.scsi_xri_cnt,
                        phba->sli4_hba.scsi_xri_max);
 
-       spin_lock_irq(&phba->scsi_buf_list_lock);
-       list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list);
-       spin_unlock_irq(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_get_lock);
+       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
+       list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
+       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
                /* max scsi xri shrinked below the allocated scsi buffers */
@@ -2994,9 +3036,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                                      psb->dma_handle);
                        kfree(psb);
                }
-               spin_lock_irq(&phba->scsi_buf_list_lock);
+               spin_lock_irq(&phba->scsi_buf_list_get_lock);
                phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
-               spin_unlock_irq(&phba->scsi_buf_list_lock);
+               spin_unlock_irq(&phba->scsi_buf_list_get_lock);
        }
 
        /* update xris associated to remaining allocated scsi buffers */
@@ -3014,9 +3056,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
                psb->cur_iocbq.sli4_lxritag = lxri;
                psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
        }
-       spin_lock_irq(&phba->scsi_buf_list_lock);
-       list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
-       spin_unlock_irq(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_get_lock);
+       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        return 0;
 
@@ -3197,14 +3242,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
                stat = 1;
                goto finished;
        }
-       if (time >= 30 * HZ) {
+       if (time >= msecs_to_jiffies(30 * 1000)) {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "0461 Scanning longer than 30 "
                                "seconds.  Continuing initialization\n");
                stat = 1;
                goto finished;
        }
-       if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
+       if (time >= msecs_to_jiffies(15 * 1000) &&
+           phba->link_state <= LPFC_LINK_DOWN) {
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                "0465 Link down longer than 15 "
                                "seconds.  Continuing initialization\n");
@@ -3216,7 +3262,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
                goto finished;
        if (vport->num_disc_nodes || vport->fc_prli_sent)
                goto finished;
-       if (vport->fc_map_cnt == 0 && time < 2 * HZ)
+       if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
                goto finished;
        if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
                goto finished;
@@ -4215,7 +4261,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
                         * If there are other active VLinks present,
                         * re-instantiate the Vlink using FDISC.
                         */
-                       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+                       mod_timer(&ndlp->nlp_delayfunc,
+                                 jiffies + msecs_to_jiffies(1000));
                        shost = lpfc_shost_from_vport(vport);
                        spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -4707,23 +4754,52 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
                return -ENOMEM;
 
        /*
-        * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+        * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
         * used to create the sg_dma_buf_pool must be dynamically calculated.
-        * 2 segments are added since the IOCB needs a command and response bde.
         */
-       phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
-               sizeof(struct fcp_rsp) +
-                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
 
+       /* Initialize the host templates the configured values. */
+       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+       /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
        if (phba->cfg_enable_bg) {
-               phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
-               phba->cfg_sg_dma_buf_size +=
-                       phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
+               /*
+                * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+                * the FCP rsp, and a BDE for each. Sice we have no control
+                * over how many protection data segments the SCSI Layer
+                * will hand us (ie: there could be one for every block
+                * in the IO), we just allocate enough BDEs to accomidate
+                * our max amount and we need to limit lpfc_sg_seg_cnt to
+                * minimize the risk of running out.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
+
+               if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
+                       phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
+
+               /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
+               phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+       } else {
+               /*
+                * The scsi_buf for a regular I/O will hold the FCP cmnd,
+                * the FCP rsp, a BDE for each, and a BDE for up to
+                * cfg_sg_seg_cnt data segments.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+               /* Total BDEs in BPL for scsi_sg_list */
+               phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
        }
 
-       /* Also reinitialize the host templates with new values. */
-       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+                       "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
+                       phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+                       phba->cfg_total_seg_cnt);
 
        phba->max_vpi = LPFC_MAX_VPI;
        /* This will be set to correct value after config_port mbox */
@@ -4789,13 +4865,13 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
 static int
 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 {
+       struct lpfc_vector_map_info *cpup;
        struct lpfc_sli *psli;
        LPFC_MBOXQ_t *mboxq;
-       int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
+       int rc, i, hbq_count, max_buf_size;
        uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
        struct lpfc_mqe *mqe;
-       int longs, sli_family;
-       int sges_per_segment;
+       int longs;
 
        /* Before proceed, wait for POST done and device ready */
        rc = lpfc_sli4_post_status_check(phba);
@@ -4863,11 +4939,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
        phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
 
-       /* With BlockGuard we can have multiple SGEs per Data Segemnt */
-       sges_per_segment = 1;
-       if (phba->cfg_enable_bg)
-               sges_per_segment = 2;
-
        /*
         * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
         * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
@@ -4878,43 +4949,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                        sizeof(struct lpfc_sli_ring), GFP_KERNEL);
        if (!phba->sli.ring)
                return -ENOMEM;
+
        /*
-        * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+        * It doesn't matter what family our adapter is in, we are
+        * limited to 2 Pages, 512 SGEs, for our SGL.
+        * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
+        */
+       max_buf_size = (2 * SLI4_PAGE_SIZE);
+       if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
+               phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+
+       /*
+        * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
         * used to create the sg_dma_buf_pool must be dynamically calculated.
-        * 2 segments are added since the IOCB needs a command and response bde.
-        * To insure that the scsi sgl does not cross a 4k page boundary only
-        * sgl sizes of must be a power of 2.
         */
-       buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
-                   (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
-                   sizeof(struct sli4_sge)));
-
-       sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
-       max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
-       switch (sli_family) {
-       case LPFC_SLI_INTF_FAMILY_BE2:
-       case LPFC_SLI_INTF_FAMILY_BE3:
-               /* There is a single hint for BE - 2 pages per BPL. */
-               if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
-                   LPFC_SLI_INTF_SLI_HINT1_1)
-                       max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
-               break;
-       case LPFC_SLI_INTF_FAMILY_LNCR_A0:
-       case LPFC_SLI_INTF_FAMILY_LNCR_B0:
-       default:
-               break;
+
+       if (phba->cfg_enable_bg) {
+               /*
+                * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
+                * the FCP rsp, and a SGE for each. Sice we have no control
+                * over how many protection data segments the SCSI Layer
+                * will hand us (ie: there could be one for every block
+                * in the IO), we just allocate enough SGEs to accomidate
+                * our max amount and we need to limit lpfc_sg_seg_cnt to
+                * minimize the risk of running out.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) + max_buf_size;
+
+               /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
+               phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
+
+               if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
+                       phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
+       } else {
+               /*
+                * The scsi_buf for a regular I/O will hold the FCP cmnd,
+                * the FCP rsp, a SGE for each, and a SGE for up to
+                * cfg_sg_seg_cnt data segments.
+                */
+               phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp) +
+                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
+
+               /* Total SGEs for scsi_sg_list */
+               phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+               /*
+                * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
+                * to post 1 page for the SGL.
+                */
        }
 
-       for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
-            dma_buf_size < max_buf_size && buf_size > dma_buf_size;
-            dma_buf_size = dma_buf_size << 1)
-               ;
-       if (dma_buf_size == max_buf_size)
-               phba->cfg_sg_seg_cnt = (dma_buf_size -
-                       sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
-                       (2 * sizeof(struct sli4_sge))) /
-                               sizeof(struct sli4_sge);
-       phba->cfg_sg_dma_buf_size = dma_buf_size;
+       /* Initialize the host templates with the updated values. */
+       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+       if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
+               phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
+       else
+               phba->cfg_sg_dma_buf_size =
+                       SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+                       "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
+                       phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
+                       phba->cfg_total_seg_cnt);
 
        /* Initialize buffer queue management fields */
        hbq_count = lpfc_sli_hbq_count();
@@ -5104,6 +5203,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                goto out_free_fcp_eq_hdl;
        }
 
+       phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
+                                        phba->sli4_hba.num_present_cpu),
+                                        GFP_KERNEL);
+       if (!phba->sli4_hba.cpu_map) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3327 Failed allocate memory for msi-x "
+                               "interrupt vector mapping\n");
+               rc = -ENOMEM;
+               goto out_free_msix;
+       }
+       /* Initialize io channels for round robin */
+       cpup = phba->sli4_hba.cpu_map;
+       rc = 0;
+       for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+               cpup->channel_id = rc;
+               rc++;
+               if (rc >= phba->cfg_fcp_io_channel)
+                       rc = 0;
+       }
+
        /*
         * Enable sr-iov virtual functions if supported and configured
         * through the module parameter.
@@ -5123,6 +5242,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
        return 0;
 
+out_free_msix:
+       kfree(phba->sli4_hba.msix_entries);
 out_free_fcp_eq_hdl:
        kfree(phba->sli4_hba.fcp_eq_hdl);
 out_free_fcf_rr_bmask:
@@ -5152,6 +5273,11 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
 {
        struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
 
+       /* Free memory allocated for msi-x interrupt vector to CPU mapping */
+       kfree(phba->sli4_hba.cpu_map);
+       phba->sli4_hba.num_present_cpu = 0;
+       phba->sli4_hba.num_online_cpu = 0;
+
        /* Free memory allocated for msi-x interrupt vector entries */
        kfree(phba->sli4_hba.msix_entries);
 
@@ -5260,8 +5386,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
        init_waitqueue_head(&phba->work_waitq);
 
        /* Initialize the scsi buffer list used by driver for scsi IO */
-       spin_lock_init(&phba->scsi_buf_list_lock);
-       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+       spin_lock_init(&phba->scsi_buf_list_get_lock);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
+       spin_lock_init(&phba->scsi_buf_list_put_lock);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
 
        /* Initialize the fabric iocb list */
        INIT_LIST_HEAD(&phba->fabric_iocb_list);
@@ -6696,6 +6824,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
        int cfg_fcp_io_channel;
        uint32_t cpu;
        uint32_t i = 0;
+       uint32_t j = 0;
 
 
        /*
@@ -6706,15 +6835,21 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
        /* Sanity check on HBA EQ parameters */
        cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
 
-       /* It doesn't make sense to have more io channels then CPUs */
-       for_each_online_cpu(cpu) {
-               i++;
+       /* It doesn't make sense to have more io channels then online CPUs */
+       for_each_present_cpu(cpu) {
+               if (cpu_online(cpu))
+                       i++;
+               j++;
        }
+       phba->sli4_hba.num_online_cpu = i;
+       phba->sli4_hba.num_present_cpu = j;
+
        if (i < cfg_fcp_io_channel) {
                lpfc_printf_log(phba,
                                KERN_ERR, LOG_INIT,
                                "3188 Reducing IO channels to match number of "
-                               "CPUs: from %d to %d\n", cfg_fcp_io_channel, i);
+                               "online CPUs: from %d to %d\n",
+                               cfg_fcp_io_channel, i);
                cfg_fcp_io_channel = i;
        }
 
@@ -7743,8 +7878,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
 
 out:
        /* Catch the not-ready port failure after a port reset. */
-       if (num_resets >= MAX_IF_TYPE_2_RESETS)
+       if (num_resets >= MAX_IF_TYPE_2_RESETS) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3317 HBA not functional: IP Reset Failed "
+                               "after (%d) retries, try: "
+                               "echo fw_reset > board_mode\n", num_resets);
                rc = -ENODEV;
+       }
 
        return rc;
 }
@@ -8208,6 +8348,269 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
        return;
 }
 
+/**
+ * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * Find next available CPU to use for IRQ to CPU affinity.
+ */
+static int
+lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
+{
+       struct lpfc_vector_map_info *cpup;
+       int cpu;
+
+       cpup = phba->sli4_hba.cpu_map;
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+               /* CPU must be online */
+               if (cpu_online(cpu)) {
+                       if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
+                           (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
+                           (cpup->phys_id == phys_id)) {
+                               return cpu;
+                       }
+               }
+               cpup++;
+       }
+
+       /*
+        * If we get here, we have used ALL CPUs for the specific
+        * phys_id. Now we need to clear out lpfc_used_cpu and start
+        * reusing CPUs.
+        */
+
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+               if (lpfc_used_cpu[cpu] == phys_id)
+                       lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
+       }
+
+       cpup = phba->sli4_hba.cpu_map;
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+               /* CPU must be online */
+               if (cpu_online(cpu)) {
+                       if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
+                           (cpup->phys_id == phys_id)) {
+                               return cpu;
+                       }
+               }
+               cpup++;
+       }
+       return LPFC_VECTOR_MAP_EMPTY;
+}
+
+/**
+ * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
+ * @phba:      pointer to lpfc hba data structure.
+ * @vectors:   number of HBA vectors
+ *
+ * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
+ * affinization across multple physical CPUs (numa nodes).
+ * In addition, this routine will assign an IO channel for each CPU
+ * to use when issuing I/Os.
+ */
+static int
+lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
+{
+       int i, idx, saved_chann, used_chann, cpu, phys_id;
+       int max_phys_id, num_io_channel, first_cpu;
+       struct lpfc_vector_map_info *cpup;
+#ifdef CONFIG_X86
+       struct cpuinfo_x86 *cpuinfo;
+#endif
+       struct cpumask *mask;
+       uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
+
+       /* If there is no mapping, just return */
+       if (!phba->cfg_fcp_cpu_map)
+               return 1;
+
+       /* Init cpu_map array */
+       memset(phba->sli4_hba.cpu_map, 0xff,
+              (sizeof(struct lpfc_vector_map_info) *
+               phba->sli4_hba.num_present_cpu));
+
+       max_phys_id = 0;
+       phys_id = 0;
+       num_io_channel = 0;
+       first_cpu = LPFC_VECTOR_MAP_EMPTY;
+
+       /* Update CPU map with physical id and core id of each CPU */
+       cpup = phba->sli4_hba.cpu_map;
+       for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
+#ifdef CONFIG_X86
+               cpuinfo = &cpu_data(cpu);
+               cpup->phys_id = cpuinfo->phys_proc_id;
+               cpup->core_id = cpuinfo->cpu_core_id;
+#else
+               /* No distinction between CPUs for other platforms */
+               cpup->phys_id = 0;
+               cpup->core_id = 0;
+#endif
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3328 CPU physid %d coreid %d\n",
+                               cpup->phys_id, cpup->core_id);
+
+               if (cpup->phys_id > max_phys_id)
+                       max_phys_id = cpup->phys_id;
+               cpup++;
+       }
+
+       /* Now associate the HBA vectors with specific CPUs */
+       for (idx = 0; idx < vectors; idx++) {
+               cpup = phba->sli4_hba.cpu_map;
+               cpu = lpfc_find_next_cpu(phba, phys_id);
+               if (cpu == LPFC_VECTOR_MAP_EMPTY) {
+
+                       /* Try for all phys_id's */
+                       for (i = 1; i < max_phys_id; i++) {
+                               phys_id++;
+                               if (phys_id > max_phys_id)
+                                       phys_id = 0;
+                               cpu = lpfc_find_next_cpu(phba, phys_id);
+                               if (cpu == LPFC_VECTOR_MAP_EMPTY)
+                                       continue;
+                               goto found;
+                       }
+
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "3329 Cannot set affinity:"
+                                       "Error mapping vector %d (%d)\n",
+                                       idx, vectors);
+                       return 0;
+               }
+found:
+               cpup += cpu;
+               if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
+                       lpfc_used_cpu[cpu] = phys_id;
+
+               /* Associate vector with selected CPU */
+               cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
+
+               /* Associate IO channel with selected CPU */
+               cpup->channel_id = idx;
+               num_io_channel++;
+
+               if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
+                       first_cpu = cpu;
+
+               /* Now affinitize to the selected CPU */
+               mask = &cpup->maskbits;
+               cpumask_clear(mask);
+               cpumask_set_cpu(cpu, mask);
+               i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
+                                         vector, mask);
+
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "3330 Set Affinity: CPU %d channel %d "
+                               "irq %d (%x)\n",
+                               cpu, cpup->channel_id,
+                               phba->sli4_hba.msix_entries[idx].vector, i);
+
+               /* Spread vector mapping across multple physical CPU nodes */
+               phys_id++;
+               if (phys_id > max_phys_id)
+                       phys_id = 0;
+       }
+
+       /*
+        * Finally fill in the IO channel for any remaining CPUs.
+        * At this point, all IO channels have been assigned to a specific
+        * MSIx vector, mapped to a specific CPU.
+        * Base the remaining IO channel assigned, to IO channels already
+        * assigned to other CPUs on the same phys_id.
+        */
+       for (i = 0; i <= max_phys_id; i++) {
+               /*
+                * If there are no io channels already mapped to
+                * this phys_id, just round robin thru the io_channels.
+                * Setup chann[] for round robin.
+                */
+               for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+                       chann[idx] = idx;
+
+               saved_chann = 0;
+               used_chann = 0;
+
+               /*
+                * First build a list of IO channels already assigned
+                * to this phys_id before reassigning the same IO
+                * channels to the remaining CPUs.
+                */
+               cpup = phba->sli4_hba.cpu_map;
+               cpu = first_cpu;
+               cpup += cpu;
+               for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
+                    idx++) {
+                       if (cpup->phys_id == i) {
+                               /*
+                                * Save any IO channels that are
+                                * already mapped to this phys_id.
+                                */
+                               if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
+                                       chann[saved_chann] =
+                                               cpup->channel_id;
+                                       saved_chann++;
+                                       goto out;
+                               }
+
+                               /* See if we are using round-robin */
+                               if (saved_chann == 0)
+                                       saved_chann =
+                                               phba->cfg_fcp_io_channel;
+
+                               /* Associate next IO channel with CPU */
+                               cpup->channel_id = chann[used_chann];
+                               num_io_channel++;
+                               used_chann++;
+                               if (used_chann == saved_chann)
+                                       used_chann = 0;
+
+                               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                                               "3331 Set IO_CHANN "
+                                               "CPU %d channel %d\n",
+                                               idx, cpup->channel_id);
+                       }
+out:
+                       cpu++;
+                       if (cpu >= phba->sli4_hba.num_present_cpu) {
+                               cpup = phba->sli4_hba.cpu_map;
+                               cpu = 0;
+                       } else {
+                               cpup++;
+                       }
+               }
+       }
+
+       if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
+               cpup = phba->sli4_hba.cpu_map;
+               for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
+                       if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
+                               cpup->channel_id = 0;
+                               num_io_channel++;
+
+                               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                                               "3332 Assign IO_CHANN "
+                                               "CPU %d channel %d\n",
+                                               idx, cpup->channel_id);
+                       }
+                       cpup++;
+               }
+       }
+
+       /* Sanity check */
+       if (num_io_channel != phba->sli4_hba.num_present_cpu)
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "3333 Set affinity mismatch:"
+                               "%d chann != %d cpus: %d vactors\n",
+                               num_io_channel, phba->sli4_hba.num_present_cpu,
+                               vectors);
+
+       phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
+       return 1;
+}
+
+
 /**
  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
  * @phba: pointer to lpfc hba data structure.
@@ -8259,9 +8662,7 @@ enable_msix_vectors:
                                phba->sli4_hba.msix_entries[index].vector,
                                phba->sli4_hba.msix_entries[index].entry);
 
-       /*
-        * Assign MSI-X vectors to interrupt handlers
-        */
+       /* Assign MSI-X vectors to interrupt handlers */
        for (index = 0; index < vectors; index++) {
                memset(&phba->sli4_hba.handler_name[index], 0, 16);
                sprintf((char *)&phba->sli4_hba.handler_name[index],
@@ -8289,6 +8690,8 @@ enable_msix_vectors:
                                phba->cfg_fcp_io_channel, vectors);
                phba->cfg_fcp_io_channel = vectors;
        }
+
+       lpfc_sli4_set_affinity(phba, vectors);
        return rc;
 
 cfg_fail_out:
@@ -9213,15 +9616,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
        /* Block all SCSI devices' I/Os on the host */
        lpfc_scsi_dev_block(phba);
 
+       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+       lpfc_sli_flush_fcp_rings(phba);
+
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
 
        /* Disable interrupt and pci device */
        lpfc_sli_disable_intr(phba);
        pci_disable_device(phba->pcidev);
-
-       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
-       lpfc_sli_flush_fcp_rings(phba);
 }
 
 /**
@@ -9966,6 +10369,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
        /* Block all SCSI devices' I/Os on the host */
        lpfc_scsi_dev_block(phba);
 
+       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
+       lpfc_sli_flush_fcp_rings(phba);
+
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
 
@@ -9973,9 +10379,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
        lpfc_sli4_disable_intr(phba);
        lpfc_sli4_queue_destroy(phba);
        pci_disable_device(phba->pcidev);
-
-       /* Flush all driver's outstanding SCSI I/Os as we are to reset */
-       lpfc_sli_flush_fcp_rings(phba);
 }
 
 /**
@@ -10535,6 +10938,7 @@ static struct miscdevice lpfc_mgmt_dev = {
 static int __init
 lpfc_init(void)
 {
+       int cpu;
        int error = 0;
 
        printk(LPFC_MODULE_DESC "\n");
@@ -10561,6 +10965,11 @@ lpfc_init(void)
                        return -ENOMEM;
                }
        }
+
+       /* Initialize in case vector mapping is needed */
+       for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++)
+               lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
+
        error = pci_register_driver(&lpfc_driver);
        if (error) {
                fc_release_transport(lpfc_transport_template);
index baf53e6c2bd15bfbbf8c25af5face63f1c9fe006..2a4e5d21eab2ad3c10f45dd7bece5c5a7ae02e06 100644 (file)
@@ -37,6 +37,7 @@
 #define LOG_EVENT      0x00010000      /* CT,TEMP,DUMP, logging */
 #define LOG_FIP                0x00020000      /* FIP events */
 #define LOG_FCP_UNDER  0x00040000      /* FCP underruns errors */
+#define LOG_SCSI_CMD   0x00080000      /* ALL SCSI commands */
 #define LOG_ALL_MSG    0xffffffff      /* LOG all messages */
 
 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
index a7a9fa468308bda8ddaa97d196fa23ce75133824..41363db7d42628b9af9ec5e1a1ab7f927ab6aff9 100644 (file)
@@ -2149,18 +2149,21 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
 
        /* Only FC supports upd bit */
        if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
-           (vport->fc_flag & FC_VFI_REGISTERED)) {
+           (vport->fc_flag & FC_VFI_REGISTERED) &&
+           (!phba->fc_topology_changed)) {
                bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
                bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
        }
        lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
                        "3134 Register VFI, mydid:x%x, fcfi:%d, "
-                       " vfi:%d, vpi:%d, fc_pname:%x%x\n",
+                       " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
+                       " port_state:x%x topology chg:%d\n",
                        vport->fc_myDID,
                        phba->fcf.fcfi,
                        phba->sli4_hba.vfi_ids[vport->vfi],
                        phba->vpi_ids[vport->vpi],
-                       reg_vfi->wwn[0], reg_vfi->wwn[1]);
+                       reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
+                       vport->port_state, phba->fc_topology_changed);
 }
 
 /**
index cd86069a0ba82a2954238139e8626283fac56d07..812d0cd7c86dcc8c9c3fe86f3ff6b8079ba850ab 100644 (file)
@@ -64,18 +64,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
        struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
        int i;
 
-       if (phba->sli_rev == LPFC_SLI_REV4)
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               /* Calculate alignment */
+               if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
+                       i = phba->cfg_sg_dma_buf_size;
+               else
+                       i = SLI4_PAGE_SIZE;
+
                phba->lpfc_scsi_dma_buf_pool =
                        pci_pool_create("lpfc_scsi_dma_buf_pool",
                                phba->pcidev,
                                phba->cfg_sg_dma_buf_size,
-                               phba->cfg_sg_dma_buf_size,
+                               i,
                                0);
-       else
+       } else {
                phba->lpfc_scsi_dma_buf_pool =
                        pci_pool_create("lpfc_scsi_dma_buf_pool",
                                phba->pcidev, phba->cfg_sg_dma_buf_size,
                                align, 0);
+       }
+
        if (!phba->lpfc_scsi_dma_buf_pool)
                goto fail;
 
index 82f4d3542289f6a8fab4cc4f99a5b5d3c2604cf1..31e9b92f5a9bebc181e0fffec874d95c94dac6e9 100644 (file)
@@ -332,9 +332,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
        /* PLOGI chkparm OK */
        lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-                        "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
+                        "0114 PLOGI chkparm OK Data: x%x x%x x%x "
+                        "x%x x%x x%x\n",
                         ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
-                        ndlp->nlp_rpi);
+                        ndlp->nlp_rpi, vport->port_state,
+                        vport->fc_flag);
 
        if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
                ndlp->nlp_fcp_info |= CLASS2;
@@ -574,7 +576,7 @@ out:
        lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
 
        /* 1 sec timeout */
-       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
 
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DELAY_TMO;
@@ -631,7 +633,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                         * If there are other active VLinks present,
                         * re-instantiate the Vlink using FDISC.
                         */
-                       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+                       mod_timer(&ndlp->nlp_delayfunc,
+                                 jiffies + msecs_to_jiffies(1000));
                        spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_DELAY_TMO;
                        spin_unlock_irq(shost->host_lock);
@@ -648,7 +651,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
                (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
                /* Only try to re-login if this is NOT a Fabric Node */
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                spin_unlock_irq(shost->host_lock);
@@ -969,7 +973,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        }
 
        /* Put ndlp in npr state set plogi timer for 1 sec */
-       mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+       mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_DELAY_TMO;
        spin_unlock_irq(shost->host_lock);
@@ -1303,7 +1307,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
        if ((irsp->ulpStatus) ||
            (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
                /* 1 sec timeout */
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                spin_unlock_irq(shost->host_lock);
@@ -1509,7 +1514,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
                }
 
                /* Put ndlp in npr state set plogi timer for 1 sec */
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                spin_unlock_irq(shost->host_lock);
@@ -2145,7 +2151,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
 
        if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
-               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               mod_timer(&ndlp->nlp_delayfunc,
+                         jiffies + msecs_to_jiffies(1000 * 1));
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag |= NLP_DELAY_TMO;
                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
index 74b8710e1e90499eadcbfc72580b12a4c223c18e..8523b278ec9daf1c2d37ffedf08335b32701cb53 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/export.h>
 #include <linux/delay.h>
 #include <asm/unaligned.h>
+#include <linux/crc-t10dif.h>
+#include <net/checksum.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
@@ -48,7 +50,7 @@
 #define LPFC_RESET_WAIT  2
 #define LPFC_ABORT_WAIT  2
 
-int _dump_buf_done;
+int _dump_buf_done = 1;
 
 static char *dif_op_str[] = {
        "PROT_NORMAL",
@@ -66,6 +68,10 @@ struct scsi_dif_tuple {
        __be32 ref_tag;         /* Target LBA or indirect LBA */
 };
 
+#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK)
+#define scsi_prot_flagged(sc, flg)     sc
+#endif
+
 static void
 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
 static void
@@ -534,7 +540,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
        dma_addr_t pdma_phys_fcp_rsp;
        dma_addr_t pdma_phys_bpl;
        uint16_t iotag;
-       int bcnt;
+       int bcnt, bpl_size;
+
+       bpl_size = phba->cfg_sg_dma_buf_size -
+               (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                        "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+                        num_to_alloc, phba->cfg_sg_dma_buf_size,
+                        (int)sizeof(struct fcp_cmnd),
+                        (int)sizeof(struct fcp_rsp), bpl_size);
 
        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
                psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
@@ -759,7 +774,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
                             struct list_head *post_sblist, int sb_count)
 {
        struct lpfc_scsi_buf *psb, *psb_next;
-       int status;
+       int status, sgl_size;
        int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
        dma_addr_t pdma_phys_bpl1;
        int last_xritag = NO_XRI;
@@ -771,6 +786,9 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
        if (sb_count <= 0)
                return -EINVAL;
 
+       sgl_size = phba->cfg_sg_dma_buf_size -
+               (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
        list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
                list_del_init(&psb->list);
                block_cnt++;
@@ -803,7 +821,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
                                post_cnt = block_cnt;
                        } else if (block_cnt == 1) {
                                /* last single sgl with non-contiguous xri */
-                               if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+                               if (sgl_size > SGL_PAGE_SIZE)
                                        pdma_phys_bpl1 = psb->dma_phys_bpl +
                                                                SGL_PAGE_SIZE;
                                else
@@ -885,9 +903,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
        int num_posted, rc = 0;
 
        /* get all SCSI buffers need to repost to a local list */
-       spin_lock_irq(&phba->scsi_buf_list_lock);
-       list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
-       spin_unlock_irq(&phba->scsi_buf_list_lock);
+       spin_lock_irq(&phba->scsi_buf_list_get_lock);
+       spin_lock_irq(&phba->scsi_buf_list_put_lock);
+       list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
+       list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
+       spin_unlock_irq(&phba->scsi_buf_list_put_lock);
+       spin_unlock_irq(&phba->scsi_buf_list_get_lock);
 
        /* post the list of scsi buffer sgls to port if available */
        if (!list_empty(&post_sblist)) {
@@ -923,13 +944,22 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
        IOCB_t *iocb;
        dma_addr_t pdma_phys_fcp_cmd;
        dma_addr_t pdma_phys_fcp_rsp;
-       dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
+       dma_addr_t pdma_phys_bpl;
        uint16_t iotag, lxri = 0;
-       int bcnt, num_posted;
+       int bcnt, num_posted, sgl_size;
        LIST_HEAD(prep_sblist);
        LIST_HEAD(post_sblist);
        LIST_HEAD(scsi_sblist);
 
+       sgl_size = phba->cfg_sg_dma_buf_size -
+               (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+
+       lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                        "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
+                        num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
+                        (int)sizeof(struct fcp_cmnd),
+                        (int)sizeof(struct fcp_rsp));
+
        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
                psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
                if (!psb)
@@ -948,6 +978,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                }
                memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 
+               /* Page alignment is CRITICAL, double check to be sure */
+               if (((unsigned long)(psb->data) &
+                   (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
+                       pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+                                     psb->data, psb->dma_handle);
+                       kfree(psb);
+                       break;
+               }
+
                /* Allocate iotag for psb->cur_iocbq. */
                iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
                if (iotag == 0) {
@@ -968,17 +1007,14 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
                psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
                psb->fcp_bpl = psb->data;
-               psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
-                       - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+               psb->fcp_cmnd = (psb->data + sgl_size);
                psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
                                        sizeof(struct fcp_cmnd));
 
                /* Initialize local short-hand pointers. */
                sgl = (struct sli4_sge *)psb->fcp_bpl;
                pdma_phys_bpl = psb->dma_handle;
-               pdma_phys_fcp_cmd =
-                       (psb->dma_handle + phba->cfg_sg_dma_buf_size)
-                        - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+               pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
                pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
 
                /*
@@ -1020,17 +1056,13 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
                iocb->ulpLe = 1;
                iocb->ulpClass = CLASS3;
                psb->cur_iocbq.context1 = psb;
-               if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
-                       pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
-               else
-                       pdma_phys_bpl1 = 0;
                psb->dma_phys_bpl = pdma_phys_bpl;
 
                /* add the scsi buffer to a post list */
                list_add_tail(&psb->list, &post_sblist);
-               spin_lock_irq(&phba->scsi_buf_list_lock);
+               spin_lock_irq(&phba->scsi_buf_list_get_lock);
                phba->sli4_hba.scsi_xri_cnt++;
-               spin_unlock_irq(&phba->scsi_buf_list_lock);
+               spin_unlock_irq(&phba->scsi_buf_list_get_lock);
        }
        lpfc_printf_log(phba, KERN_INFO, LOG_BG,
                        "3021 Allocate %d out of %d requested new SCSI "
@@ -1079,17 +1111,23 @@ static struct lpfc_scsi_buf*
 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        struct  lpfc_scsi_buf * lpfc_cmd = NULL;
-       struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
-       unsigned long iflag = 0;
-
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-       list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
-       if (lpfc_cmd) {
-               lpfc_cmd->seg_cnt = 0;
-               lpfc_cmd->nonsg_phys = 0;
-               lpfc_cmd->prot_seg_cnt = 0;
+       struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
+       unsigned long gflag = 0;
+       unsigned long pflag = 0;
+
+       spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+       list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
+                        list);
+       if (!lpfc_cmd) {
+               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+               list_splice(&phba->lpfc_scsi_buf_list_put,
+                           &phba->lpfc_scsi_buf_list_get);
+               INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+               list_remove_head(scsi_buf_list_get, lpfc_cmd,
+                                struct lpfc_scsi_buf, list);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
        }
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
        return  lpfc_cmd;
 }
 /**
@@ -1107,28 +1145,39 @@ static struct lpfc_scsi_buf*
 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        struct lpfc_scsi_buf *lpfc_cmd ;
-       unsigned long iflag = 0;
+       unsigned long gflag = 0;
+       unsigned long pflag = 0;
        int found = 0;
 
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
-       list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
-                                                       list) {
+       spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
+       list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) {
                if (lpfc_test_rrq_active(phba, ndlp,
                                         lpfc_cmd->cur_iocbq.sli4_lxritag))
                        continue;
                list_del(&lpfc_cmd->list);
                found = 1;
-               lpfc_cmd->seg_cnt = 0;
-               lpfc_cmd->nonsg_phys = 0;
-               lpfc_cmd->prot_seg_cnt = 0;
                break;
        }
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
-                                                iflag);
+       if (!found) {
+               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
+               list_splice(&phba->lpfc_scsi_buf_list_put,
+                           &phba->lpfc_scsi_buf_list_get);
+               INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
+               list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get,
+                                   list) {
+                       if (lpfc_test_rrq_active(
+                               phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
+                               continue;
+                       list_del(&lpfc_cmd->list);
+                       found = 1;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
        if (!found)
                return NULL;
-       else
-               return  lpfc_cmd;
+       return  lpfc_cmd;
 }
 /**
  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1160,10 +1209,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
        unsigned long iflag = 0;
 
-       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+       psb->seg_cnt = 0;
+       psb->nonsg_phys = 0;
+       psb->prot_seg_cnt = 0;
+
+       spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
        psb->pCmd = NULL;
-       list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
-       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+       list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
 }
 
 /**
@@ -1181,6 +1235,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
        unsigned long iflag = 0;
 
+       psb->seg_cnt = 0;
+       psb->nonsg_phys = 0;
+       psb->prot_seg_cnt = 0;
+
        if (psb->exch_busy) {
                spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
                                        iflag);
@@ -1190,11 +1248,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
                spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
                                        iflag);
        } else {
-
-               spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
                psb->pCmd = NULL;
-               list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
-               spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+               psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
+               spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
+               list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
        }
 }
 
@@ -1268,6 +1326,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
                               "dma_map_sg.  Config %d, seg_cnt %d\n",
                               __func__, phba->cfg_sg_seg_cnt,
                               lpfc_cmd->seg_cnt);
+                       lpfc_cmd->seg_cnt = 0;
                        scsi_dma_unmap(scsi_cmnd);
                        return 1;
                }
@@ -2013,9 +2072,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
        bf_set(pde6_optx, pde6, txop);
        bf_set(pde6_oprx, pde6, rxop);
+
+       /*
+        * We only need to check the data on READs, for WRITEs
+        * protection data is automatically generated, not checked.
+        */
        if (datadir == DMA_FROM_DEVICE) {
-               bf_set(pde6_ce, pde6, checking);
-               bf_set(pde6_re, pde6, checking);
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+                       bf_set(pde6_ce, pde6, checking);
+               else
+                       bf_set(pde6_ce, pde6, 0);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(pde6_re, pde6, checking);
+               else
+                       bf_set(pde6_re, pde6, 0);
        }
        bf_set(pde6_ai, pde6, 1);
        bf_set(pde6_ae, pde6, 0);
@@ -2145,6 +2216,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
        split_offset = 0;
        do {
+               /* Check to see if we ran out of space */
+               if (num_bde >= (phba->cfg_total_seg_cnt - 2))
+                       return num_bde + 3;
+
                /* setup PDE5 with what we have */
                pde5 = (struct lpfc_pde5 *) bpl;
                memset(pde5, 0, sizeof(struct lpfc_pde5));
@@ -2164,8 +2239,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
                bf_set(pde6_optx, pde6, txop);
                bf_set(pde6_oprx, pde6, rxop);
-               bf_set(pde6_ce, pde6, checking);
-               bf_set(pde6_re, pde6, checking);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+                       bf_set(pde6_ce, pde6, checking);
+               else
+                       bf_set(pde6_ce, pde6, 0);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(pde6_re, pde6, checking);
+               else
+                       bf_set(pde6_re, pde6, 0);
+
                bf_set(pde6_ai, pde6, 1);
                bf_set(pde6_ae, pde6, 0);
                bf_set(pde6_apptagval, pde6, 0);
@@ -2213,6 +2297,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                pgdone = 0;
                subtotal = 0; /* total bytes processed for current prot grp */
                while (!pgdone) {
+                       /* Check to see if we ran out of space */
+                       if (num_bde >= phba->cfg_total_seg_cnt)
+                               return num_bde + 1;
+
                        if (!sgde) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
                                        "9065 BLKGRD:%s Invalid data segment\n",
@@ -2324,7 +2412,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        struct sli4_sge_diseed *diseed = NULL;
        dma_addr_t physaddr;
        int i = 0, num_sge = 0, status;
-       int datadir = sc->sc_data_direction;
        uint32_t reftag;
        unsigned blksize;
        uint8_t txop, rxop;
@@ -2362,13 +2449,26 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
        diseed->ref_tag = cpu_to_le32(reftag);
        diseed->ref_tag_tran = diseed->ref_tag;
 
+       /*
+        * We only need to check the data on READs, for WRITEs
+        * protection data is automatically generated, not checked.
+        */
+       if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK))
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+               else
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+               else
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+       }
+
        /* setup DISEED with the rest of the info */
        bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
        bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
-       if (datadir == DMA_FROM_DEVICE) {
-               bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-               bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
-       }
+
        bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
        bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
 
@@ -2497,6 +2597,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 
        split_offset = 0;
        do {
+               /* Check to see if we ran out of space */
+               if (num_sge >= (phba->cfg_total_seg_cnt - 2))
+                       return num_sge + 3;
+
                /* setup DISEED with what we have */
                diseed = (struct sli4_sge_diseed *) sgl;
                memset(diseed, 0, sizeof(struct sli4_sge_diseed));
@@ -2506,11 +2610,34 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                diseed->ref_tag = cpu_to_le32(reftag);
                diseed->ref_tag_tran = diseed->ref_tag;
 
+               if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) {
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
+
+               } else {
+                       bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
+                       /*
+                        * When in this mode, the hardware will replace
+                        * the guard tag from the host with a
+                        * newly generated good CRC for the wire.
+                        * Switch to raw mode here to avoid this
+                        * behavior. What the host sends gets put on the wire.
+                        */
+                       if (txop == BG_OP_IN_CRC_OUT_CRC) {
+                               txop = BG_OP_RAW_MODE;
+                               rxop = BG_OP_RAW_MODE;
+                       }
+               }
+
+
+               if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK))
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+               else
+                       bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
+
                /* setup DISEED with the rest of the info */
                bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
                bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
-               bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
-               bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
+
                bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
                bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
 
@@ -2556,6 +2683,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
                pgdone = 0;
                subtotal = 0; /* total bytes processed for current prot grp */
                while (!pgdone) {
+                       /* Check to see if we ran out of space */
+                       if (num_sge >= phba->cfg_total_seg_cnt)
+                               return num_sge + 1;
+
                        if (!sgde) {
                                lpfc_printf_log(phba, KERN_ERR, LOG_BG,
                                        "9086 BLKGRD:%s Invalid data segment\n",
@@ -2669,6 +2800,47 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
        return ret;
 }
 
+/**
+ * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be adjusted.
+ *
+ * Adjust the data length to account for how much data
+ * is actually on the wire.
+ *
+ * returns the adjusted data length
+ **/
+static int
+lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
+                      struct lpfc_scsi_buf *lpfc_cmd)
+{
+       struct scsi_cmnd *sc = lpfc_cmd->pCmd;
+       int fcpdl;
+
+       fcpdl = scsi_bufflen(sc);
+
+       /* Check if there is protection data on the wire */
+       if (sc->sc_data_direction == DMA_FROM_DEVICE) {
+               /* Read */
+               if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
+                       return fcpdl;
+
+       } else {
+               /* Write */
+               if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
+                       return fcpdl;
+       }
+
+       /*
+        * If we are in DIF Type 1 mode every data block has a 8 byte
+        * DIF (trailer) attached to it. Must ajust FCP data length.
+        */
+       if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI))
+               fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
+
+       return fcpdl;
+}
+
 /**
  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
  * @phba: The Hba for which this call is being executed.
@@ -2689,8 +2861,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
        uint32_t num_bde = 0;
        int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
        int prot_group_type = 0;
-       int diflen, fcpdl;
-       unsigned blksize;
+       int fcpdl;
 
        /*
         * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
@@ -2711,28 +2882,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
                        return 1;
 
                lpfc_cmd->seg_cnt = datasegcnt;
-               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9067 BLKGRD: %s: Too many sg segments"
-                                       " from dma_map_sg.  Config %d, seg_cnt"
-                                       " %d\n",
-                                       __func__, phba->cfg_sg_seg_cnt,
-                                       lpfc_cmd->seg_cnt);
-                       scsi_dma_unmap(scsi_cmnd);
-                       return 1;
-               }
+
+               /* First check if data segment count from SCSI Layer is good */
+               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+                       goto err;
 
                prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
 
                switch (prot_group_type) {
                case LPFC_PG_TYPE_NO_DIF:
+
+                       /* Here we need to add a PDE5 and PDE6 to the count */
+                       if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
+                               goto err;
+
                        num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
                                        datasegcnt);
                        /* we should have 2 or more entries in buffer list */
                        if (num_bde < 2)
                                goto err;
                        break;
-               case LPFC_PG_TYPE_DIF_BUF:{
+
+               case LPFC_PG_TYPE_DIF_BUF:
                        /*
                         * This type indicates that protection buffers are
                         * passed to the driver, so that needs to be prepared
@@ -2747,31 +2918,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
                        }
 
                        lpfc_cmd->prot_seg_cnt = protsegcnt;
-                       if (lpfc_cmd->prot_seg_cnt
-                           > phba->cfg_prot_sg_seg_cnt) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9068 BLKGRD: %s: Too many prot sg "
-                                       "segments from dma_map_sg.  Config %d,"
-                                               "prot_seg_cnt %d\n", __func__,
-                                               phba->cfg_prot_sg_seg_cnt,
-                                               lpfc_cmd->prot_seg_cnt);
-                               dma_unmap_sg(&phba->pcidev->dev,
-                                            scsi_prot_sglist(scsi_cmnd),
-                                            scsi_prot_sg_count(scsi_cmnd),
-                                            datadir);
-                               scsi_dma_unmap(scsi_cmnd);
-                               return 1;
-                       }
+
+                       /*
+                        * There is a minimun of 4 BPLs used for every
+                        * protection data segment.
+                        */
+                       if ((lpfc_cmd->prot_seg_cnt * 4) >
+                           (phba->cfg_total_seg_cnt - 2))
+                               goto err;
 
                        num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
                                        datasegcnt, protsegcnt);
                        /* we should have 3 or more entries in buffer list */
-                       if (num_bde < 3)
+                       if ((num_bde < 3) ||
+                           (num_bde > phba->cfg_total_seg_cnt))
                                goto err;
                        break;
-               }
+
                case LPFC_PG_TYPE_INVALID:
                default:
+                       scsi_dma_unmap(scsi_cmnd);
+                       lpfc_cmd->seg_cnt = 0;
+
                        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
                                        "9022 Unexpected protection group %i\n",
                                        prot_group_type);
@@ -2790,18 +2958,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
        iocb_cmd->ulpBdeCount = 1;
        iocb_cmd->ulpLe = 1;
 
-       fcpdl = scsi_bufflen(scsi_cmnd);
-
-       if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
-               /*
-                * We are in DIF Type 1 mode
-                * Every data block has a 8 byte DIF (trailer)
-                * attached to it.  Must ajust FCP data length
-                */
-               blksize = lpfc_cmd_blksize(scsi_cmnd);
-               diflen = (fcpdl / blksize) * 8;
-               fcpdl += diflen;
-       }
+       fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
        fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
 
        /*
@@ -2812,13 +2969,233 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
 
        return 0;
 err:
+       if (lpfc_cmd->seg_cnt)
+               scsi_dma_unmap(scsi_cmnd);
+       if (lpfc_cmd->prot_seg_cnt)
+               dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+                            scsi_prot_sg_count(scsi_cmnd),
+                            scsi_cmnd->sc_data_direction);
+
        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "9023 Could not setup all needed BDE's"
-                       "prot_group_type=%d, num_bde=%d\n",
+                       "9023 Cannot setup S/G List for HBA"
+                       "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
+                       lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+                       phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
                        prot_group_type, num_bde);
+
+       lpfc_cmd->seg_cnt = 0;
+       lpfc_cmd->prot_seg_cnt = 0;
        return 1;
 }
 
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CRC algorithmn
+ * using crc_t10dif.
+ */
+uint16_t
+lpfc_bg_crc(uint8_t *data, int count)
+{
+       uint16_t crc = 0;
+       uint16_t x;
+
+       crc = crc_t10dif(data, count);
+       x = cpu_to_be16(crc);
+       return x;
+}
+
+/*
+ * This function calcuates the T10 DIF guard tag
+ * on the specified data using a CSUM algorithmn
+ * using ip_compute_csum.
+ */
+uint16_t
+lpfc_bg_csum(uint8_t *data, int count)
+{
+       uint16_t ret;
+
+       ret = ip_compute_csum(data, count);
+       return ret;
+}
+
+/*
+ * This function examines the protection data to try to determine
+ * what type of T10-DIF error occurred.
+ */
+void
+lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+       struct scatterlist *sgpe; /* s/g prot entry */
+       struct scatterlist *sgde; /* s/g data entry */
+       struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
+       struct scsi_dif_tuple *src = NULL;
+       uint8_t *data_src = NULL;
+       uint16_t guard_tag, guard_type;
+       uint16_t start_app_tag, app_tag;
+       uint32_t start_ref_tag, ref_tag;
+       int prot, protsegcnt;
+       int err_type, len, data_len;
+       int chk_ref, chk_app, chk_guard;
+       uint16_t sum;
+       unsigned blksize;
+
+       err_type = BGS_GUARD_ERR_MASK;
+       sum = 0;
+       guard_tag = 0;
+
+       /* First check to see if there is protection data to examine */
+       prot = scsi_get_prot_op(cmd);
+       if ((prot == SCSI_PROT_READ_STRIP) ||
+           (prot == SCSI_PROT_WRITE_INSERT) ||
+           (prot == SCSI_PROT_NORMAL))
+               goto out;
+
+       /* Currently the driver just supports ref_tag and guard_tag checking */
+       chk_ref = 1;
+       chk_app = 0;
+       chk_guard = 0;
+
+       /* Setup a ptr to the protection data provided by the SCSI host */
+       sgpe = scsi_prot_sglist(cmd);
+       protsegcnt = lpfc_cmd->prot_seg_cnt;
+
+       if (sgpe && protsegcnt) {
+
+               /*
+                * We will only try to verify guard tag if the segment
+                * data length is a multiple of the blksize.
+                */
+               sgde = scsi_sglist(cmd);
+               blksize = lpfc_cmd_blksize(cmd);
+               data_src = (uint8_t *)sg_virt(sgde);
+               data_len = sgde->length;
+               if ((data_len & (blksize - 1)) == 0)
+                       chk_guard = 1;
+               guard_type = scsi_host_get_guard(cmd->device->host);
+
+               start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
+               start_app_tag = src->app_tag;
+               src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+               len = sgpe->length;
+               while (src && protsegcnt) {
+                       while (len) {
+
+                               /*
+                                * First check to see if a protection data
+                                * check is valid
+                                */
+                               if ((src->ref_tag == 0xffffffff) ||
+                                   (src->app_tag == 0xffff)) {
+                                       start_ref_tag++;
+                                       goto skipit;
+                               }
+
+                               /* App Tag checking */
+                               app_tag = src->app_tag;
+                               if (chk_app && (app_tag != start_app_tag)) {
+                                       err_type = BGS_APPTAG_ERR_MASK;
+                                       goto out;
+                               }
+
+                               /* Reference Tag checking */
+                               ref_tag = be32_to_cpu(src->ref_tag);
+                               if (chk_ref && (ref_tag != start_ref_tag)) {
+                                       err_type = BGS_REFTAG_ERR_MASK;
+                                       goto out;
+                               }
+                               start_ref_tag++;
+
+                               /* Guard Tag checking */
+                               if (chk_guard) {
+                                       guard_tag = src->guard_tag;
+                                       if (guard_type == SHOST_DIX_GUARD_IP)
+                                               sum = lpfc_bg_csum(data_src,
+                                                                  blksize);
+                                       else
+                                               sum = lpfc_bg_crc(data_src,
+                                                                 blksize);
+                                       if ((guard_tag != sum)) {
+                                               err_type = BGS_GUARD_ERR_MASK;
+                                               goto out;
+                                       }
+                               }
+skipit:
+                               len -= sizeof(struct scsi_dif_tuple);
+                               if (len < 0)
+                                       len = 0;
+                               src++;
+
+                               data_src += blksize;
+                               data_len -= blksize;
+
+                               /*
+                                * Are we at the end of the Data segment?
+                                * The data segment is only used for Guard
+                                * tag checking.
+                                */
+                               if (chk_guard && (data_len == 0)) {
+                                       chk_guard = 0;
+                                       sgde = sg_next(sgde);
+                                       if (!sgde)
+                                               goto out;
+
+                                       data_src = (uint8_t *)sg_virt(sgde);
+                                       data_len = sgde->length;
+                                       if ((data_len & (blksize - 1)) == 0)
+                                               chk_guard = 1;
+                               }
+                       }
+
+                       /* Goto the next Protection data segment */
+                       sgpe = sg_next(sgpe);
+                       if (sgpe) {
+                               src = (struct scsi_dif_tuple *)sg_virt(sgpe);
+                               len = sgpe->length;
+                       } else {
+                               src = NULL;
+                       }
+                       protsegcnt--;
+               }
+       }
+out:
+       if (err_type == BGS_GUARD_ERR_MASK) {
+               scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x10, 0x1);
+               cmd->result = DRIVER_SENSE << 24
+                       | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+               phba->bg_guard_err_cnt++;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
+                               (unsigned long)scsi_get_lba(cmd),
+                               sum, guard_tag);
+
+       } else if (err_type == BGS_REFTAG_ERR_MASK) {
+               scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x10, 0x3);
+               cmd->result = DRIVER_SENSE << 24
+                       | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+               phba->bg_reftag_err_cnt++;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
+                               (unsigned long)scsi_get_lba(cmd),
+                               ref_tag, start_ref_tag);
+
+       } else if (err_type == BGS_APPTAG_ERR_MASK) {
+               scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x10, 0x2);
+               cmd->result = DRIVER_SENSE << 24
+                       | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
+
+               phba->bg_apptag_err_cnt++;
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
+                               (unsigned long)scsi_get_lba(cmd),
+                               app_tag, start_app_tag);
+       }
+}
+
+
 /*
  * This function checks for BlockGuard errors detected by
  * the HBA.  In case of errors, the ASC/ASCQ fields in the
@@ -2842,12 +3219,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
        uint32_t bgstat = bgf->bgstat;
        uint64_t failing_sector = 0;
 
-       lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
-                       " 0x%x lba 0x%llx blk cnt 0x%x "
-                       "bgstat=0x%x bghm=0x%x\n",
-                       cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
-                       blk_rq_sectors(cmd->request), bgstat, bghm);
-
        spin_lock(&_dump_buf_lock);
        if (!_dump_buf_done) {
                lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
@@ -2870,18 +3241,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 
        if (lpfc_bgs_get_invalid_prof(bgstat)) {
                cmd->result = ScsiResult(DID_ERROR, 0);
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
-                       " BlockGuard profile. bgstat:0x%x\n",
-                       bgstat);
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9072 BLKGRD: Invalid BG Profile in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
                ret = (-1);
                goto out;
        }
 
        if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
                cmd->result = ScsiResult(DID_ERROR, 0);
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
-                               "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
-                               bgstat);
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9073 BLKGRD: Invalid BG PDIF Block in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
                ret = (-1);
                goto out;
        }
@@ -2894,8 +3271,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                cmd->result = DRIVER_SENSE << 24
                        | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
                phba->bg_guard_err_cnt++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9055 BLKGRD: guard_tag error\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9055 BLKGRD: Guard Tag error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_reftag_err(bgstat)) {
@@ -2907,8 +3288,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                        | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 
                phba->bg_reftag_err_cnt++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9056 BLKGRD: ref_tag error\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9056 BLKGRD: Ref Tag error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_apptag_err(bgstat)) {
@@ -2920,8 +3305,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
                        | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
 
                phba->bg_apptag_err_cnt++;
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9061 BLKGRD: app_tag error\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9061 BLKGRD: App Tag error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
        }
 
        if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
@@ -2960,11 +3349,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
 
        if (!ret) {
                /* No error was reported - problem in FW? */
-               cmd->result = ScsiResult(DID_ERROR, 0);
-               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                       "9057 BLKGRD: Unknown error reported!\n");
+               lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
+                               "9057 BLKGRD: Unknown error in cmd"
+                               " 0x%x lba 0x%llx blk cnt 0x%x "
+                               "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
+                               (unsigned long long)scsi_get_lba(cmd),
+                               blk_rq_sectors(cmd->request), bgstat, bghm);
+
+               /* Calcuate what type of error it was */
+               lpfc_calc_bg_err(phba, lpfc_cmd);
        }
-
 out:
        return ret;
 }
@@ -3028,6 +3422,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
                                "dma_map_sg.  Config %d, seg_cnt %d\n",
                                __func__, phba->cfg_sg_seg_cnt,
                               lpfc_cmd->seg_cnt);
+                       lpfc_cmd->seg_cnt = 0;
                        scsi_dma_unmap(scsi_cmnd);
                        return 1;
                }
@@ -3093,45 +3488,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
        return 0;
 }
 
-/**
- * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
- * @phba: The Hba for which this call is being executed.
- * @lpfc_cmd: The scsi buffer which is going to be adjusted.
- *
- * Adjust the data length to account for how much data
- * is actually on the wire.
- *
- * returns the adjusted data length
- **/
-static int
-lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
-               struct lpfc_scsi_buf *lpfc_cmd)
-{
-       struct scsi_cmnd *sc = lpfc_cmd->pCmd;
-       int diflen, fcpdl;
-       unsigned blksize;
-
-       fcpdl = scsi_bufflen(sc);
-
-       /* Check if there is protection data on the wire */
-       if (sc->sc_data_direction == DMA_FROM_DEVICE) {
-               /* Read */
-               if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
-                       return fcpdl;
-
-       } else {
-               /* Write */
-               if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
-                       return fcpdl;
-       }
-
-       /* If protection data on the wire, adjust the count accordingly */
-       blksize = lpfc_cmd_blksize(sc);
-       diflen = (fcpdl / blksize) * 8;
-       fcpdl += diflen;
-       return fcpdl;
-}
-
 /**
  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
  * @phba: The Hba for which this call is being executed.
@@ -3149,14 +3505,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
        struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
        struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
        IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
-       uint32_t num_bde = 0;
+       uint32_t num_sge = 0;
        int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
        int prot_group_type = 0;
        int fcpdl;
 
        /*
         * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
-        *  fcp_rsp regions to the first data bde entry
+        *  fcp_rsp regions to the first data sge entry
         */
        if (scsi_sg_count(scsi_cmnd)) {
                /*
@@ -3179,28 +3535,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
 
                sgl += 1;
                lpfc_cmd->seg_cnt = datasegcnt;
-               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9087 BLKGRD: %s: Too many sg segments"
-                                       " from dma_map_sg.  Config %d, seg_cnt"
-                                       " %d\n",
-                                       __func__, phba->cfg_sg_seg_cnt,
-                                       lpfc_cmd->seg_cnt);
-                       scsi_dma_unmap(scsi_cmnd);
-                       return 1;
-               }
+
+               /* First check if data segment count from SCSI Layer is good */
+               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
+                       goto err;
 
                prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
 
                switch (prot_group_type) {
                case LPFC_PG_TYPE_NO_DIF:
-                       num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
+                       /* Here we need to add a DISEED to the count */
+                       if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
+                               goto err;
+
+                       num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
                                        datasegcnt);
+
                        /* we should have 2 or more entries in buffer list */
-                       if (num_bde < 2)
+                       if (num_sge < 2)
                                goto err;
                        break;
-               case LPFC_PG_TYPE_DIF_BUF:{
+
+               case LPFC_PG_TYPE_DIF_BUF:
                        /*
                         * This type indicates that protection buffers are
                         * passed to the driver, so that needs to be prepared
@@ -3215,31 +3571,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
                        }
 
                        lpfc_cmd->prot_seg_cnt = protsegcnt;
-                       if (lpfc_cmd->prot_seg_cnt
-                           > phba->cfg_prot_sg_seg_cnt) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_BG,
-                                       "9088 BLKGRD: %s: Too many prot sg "
-                                       "segments from dma_map_sg.  Config %d,"
-                                               "prot_seg_cnt %d\n", __func__,
-                                               phba->cfg_prot_sg_seg_cnt,
-                                               lpfc_cmd->prot_seg_cnt);
-                               dma_unmap_sg(&phba->pcidev->dev,
-                                            scsi_prot_sglist(scsi_cmnd),
-                                            scsi_prot_sg_count(scsi_cmnd),
-                                            datadir);
-                               scsi_dma_unmap(scsi_cmnd);
-                               return 1;
-                       }
+                       /*
+                        * There is a minimun of 3 SGEs used for every
+                        * protection data segment.
+                        */
+                       if ((lpfc_cmd->prot_seg_cnt * 3) >
+                           (phba->cfg_total_seg_cnt - 2))
+                               goto err;
 
-                       num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
+                       num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
                                        datasegcnt, protsegcnt);
+
                        /* we should have 3 or more entries in buffer list */
-                       if (num_bde < 3)
+                       if ((num_sge < 3) ||
+                           (num_sge > phba->cfg_total_seg_cnt))
                                goto err;
                        break;
-               }
+
                case LPFC_PG_TYPE_INVALID:
                default:
+                       scsi_dma_unmap(scsi_cmnd);
+                       lpfc_cmd->seg_cnt = 0;
+
                        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
                                        "9083 Unexpected protection group %i\n",
                                        prot_group_type);
@@ -3263,7 +3616,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
        }
 
        fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
-
        fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
 
        /*
@@ -3274,10 +3626,22 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
 
        return 0;
 err:
+       if (lpfc_cmd->seg_cnt)
+               scsi_dma_unmap(scsi_cmnd);
+       if (lpfc_cmd->prot_seg_cnt)
+               dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
+                            scsi_prot_sg_count(scsi_cmnd),
+                            scsi_cmnd->sc_data_direction);
+
        lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "9084 Could not setup all needed BDE's"
-                       "prot_group_type=%d, num_bde=%d\n",
-                       prot_group_type, num_bde);
+                       "9084 Cannot setup S/G List for HBA"
+                       "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
+                       lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
+                       phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
+                       prot_group_type, num_sge);
+
+       lpfc_cmd->seg_cnt = 0;
+       lpfc_cmd->prot_seg_cnt = 0;
        return 1;
 }
 
@@ -4357,7 +4721,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
 
        if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
                if (vport->phba->cfg_enable_bg) {
-                       lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+                       lpfc_printf_vlog(vport,
+                                        KERN_INFO, LOG_SCSI_CMD,
                                         "9033 BLKGRD: rcvd %s cmd:x%x "
                                         "sector x%llx cnt %u pt %x\n",
                                         dif_op_str[scsi_get_prot_op(cmnd)],
@@ -4369,7 +4734,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
                err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
        } else {
                if (vport->phba->cfg_enable_bg) {
-                       lpfc_printf_vlog(vport, KERN_INFO, LOG_BG,
+                       lpfc_printf_vlog(vport,
+                                        KERN_INFO, LOG_SCSI_CMD,
                                         "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
                                         "x%x sector x%llx cnt %u pt %x\n",
                                         cmnd->cmnd[0],
@@ -4542,7 +4908,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        /* Wait for abort to complete */
        wait_event_timeout(waitq,
                          (lpfc_cmd->pCmd != cmnd),
-                          (2*vport->cfg_devloss_tmo*HZ));
+                          msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
        lpfc_cmd->waitq = NULL;
 
        if (lpfc_cmd->pCmd == cmnd) {
@@ -5012,16 +5378,24 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
        struct lpfc_hba *phba = vport->phba;
        int rc, ret = SUCCESS;
 
+       lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                        "3172 SCSI layer issued Host Reset Data:\n");
+
        lpfc_offline_prep(phba, LPFC_MBX_WAIT);
        lpfc_offline(phba);
        rc = lpfc_sli_brdrestart(phba);
        if (rc)
                ret = FAILED;
-       lpfc_online(phba);
+       rc = lpfc_online(phba);
+       if (rc)
+               ret = FAILED;
        lpfc_unblock_mgmt_io(phba);
 
-       lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-                       "3172 SCSI layer issued Host Reset Data: x%x\n", ret);
+       if (ret == FAILED) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                                "3323 Failed host reset, bring it offline\n");
+               lpfc_sli4_offline_eratt(phba);
+       }
        return ret;
 }
 
@@ -5088,11 +5462,11 @@ lpfc_slave_alloc(struct scsi_device *sdev)
        }
        num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
        if (num_to_alloc != num_allocated) {
-                       lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
-                                "0708 Allocation request of %d "
-                                "command buffers did not succeed.  "
-                                "Allocated %d buffers.\n",
-                                num_to_alloc, num_allocated);
+                       lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+                                        "0708 Allocation request of %d "
+                                        "command buffers did not succeed.  "
+                                        "Allocated %d buffers.\n",
+                                        num_to_alloc, num_allocated);
        }
        if (num_allocated > 0)
                phba->total_scsi_bufs += num_allocated;
index 35dd17eb0f27907f67f4c50968a95452498b93be..572579f87de4fb1c3f74268da15ff8d226384103 100644 (file)
@@ -667,7 +667,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
 
        spin_lock_irqsave(&phba->hbalock, iflags);
        phba->hba_flag &= ~HBA_RRQ_ACTIVE;
-       next_time = jiffies + HZ * (phba->fc_ratov + 1);
+       next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
        list_for_each_entry_safe(rrq, nextrrq,
                                 &phba->active_rrq_list, list) {
                if (time_after(jiffies, rrq->rrq_stop_time))
@@ -782,7 +782,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
                return;
        spin_lock_irqsave(&phba->hbalock, iflags);
        phba->hba_flag &= ~HBA_RRQ_ACTIVE;
-       next_time = jiffies + HZ * (phba->fc_ratov * 2);
+       next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
        list_splice_init(&phba->active_rrq_list, &rrq_list);
        spin_unlock_irqrestore(&phba->hbalock, iflags);
 
@@ -878,7 +878,8 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
        else
                rrq->send_rrq = 0;
        rrq->xritag = xritag;
-       rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
+       rrq->rrq_stop_time = jiffies +
+                               msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
        rrq->ndlp = ndlp;
        rrq->nlp_DID = ndlp->nlp_DID;
        rrq->vport = ndlp->vport;
@@ -926,8 +927,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
        } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
                        !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
                ndlp = piocbq->context_un.ndlp;
-       else  if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) &&
-                       (piocbq->iocb_flag & LPFC_IO_LIBDFC))
+       else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC)
                ndlp = piocbq->context_un.ndlp;
        else
                ndlp = piocbq->context1;
@@ -1339,7 +1339,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                        BUG();
                else
                        mod_timer(&piocb->vport->els_tmofunc,
-                                 jiffies + HZ * (phba->fc_ratov << 1));
+                               jiffies +
+                               msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
        }
 
 
@@ -2340,7 +2341,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
                /* Mailbox cmd <cmd> Cmpl <cmpl> */
                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
                                "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
-                               "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
+                               "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+                               "x%x x%x x%x\n",
                                pmb->vport ? pmb->vport->vpi : 0,
                                pmbox->mbxCommand,
                                lpfc_sli_config_mbox_subsys_get(phba, pmb),
@@ -2354,7 +2356,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
                                pmbox->un.varWords[4],
                                pmbox->un.varWords[5],
                                pmbox->un.varWords[6],
-                               pmbox->un.varWords[7]);
+                               pmbox->un.varWords[7],
+                               pmbox->un.varWords[8],
+                               pmbox->un.varWords[9],
+                               pmbox->un.varWords[10]);
 
                if (pmb->mbox_cmpl)
                        pmb->mbox_cmpl(phba,pmb);
@@ -2908,8 +2913,9 @@ void lpfc_poll_eratt(unsigned long ptr)
                lpfc_worker_wake_up(phba);
        else
                /* Restart the timer for next eratt poll */
-               mod_timer(&phba->eratt_poll, jiffies +
-                                       HZ * LPFC_ERATT_POLL_INTERVAL);
+               mod_timer(&phba->eratt_poll,
+                         jiffies +
+                         msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
        return;
 }
 
@@ -5511,6 +5517,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
                        list_del_init(&rsrc_blk->list);
                        kfree(rsrc_blk);
                }
+               phba->sli4_hba.max_cfg_param.vpi_used = 0;
                break;
        case LPFC_RSC_TYPE_FCOE_XRI:
                kfree(phba->sli4_hba.xri_bmask);
@@ -5811,6 +5818,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
                lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
        } else {
                kfree(phba->vpi_bmask);
+               phba->sli4_hba.max_cfg_param.vpi_used = 0;
                kfree(phba->vpi_ids);
                bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
                kfree(phba->sli4_hba.xri_bmask);
@@ -5992,7 +6000,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
        struct lpfc_sglq *sglq_entry = NULL;
        struct lpfc_sglq *sglq_entry_next = NULL;
        struct lpfc_sglq *sglq_entry_first = NULL;
-       int status, post_cnt = 0, num_posted = 0, block_cnt = 0;
+       int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
        int last_xritag = NO_XRI;
        LIST_HEAD(prep_sgl_list);
        LIST_HEAD(blck_sgl_list);
@@ -6004,6 +6012,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
        list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
        spin_unlock_irq(&phba->hbalock);
 
+       total_cnt = phba->sli4_hba.els_xri_cnt;
        list_for_each_entry_safe(sglq_entry, sglq_entry_next,
                                 &allc_sgl_list, list) {
                list_del_init(&sglq_entry->list);
@@ -6055,9 +6064,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
                                                sglq_entry->sli4_xritag);
                                        list_add_tail(&sglq_entry->list,
                                                      &free_sgl_list);
-                                       spin_lock_irq(&phba->hbalock);
-                                       phba->sli4_hba.els_xri_cnt--;
-                                       spin_unlock_irq(&phba->hbalock);
+                                       total_cnt--;
                                }
                        }
                }
@@ -6085,9 +6092,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
                                        (sglq_entry_first->sli4_xritag +
                                         post_cnt - 1));
                        list_splice_init(&blck_sgl_list, &free_sgl_list);
-                       spin_lock_irq(&phba->hbalock);
-                       phba->sli4_hba.els_xri_cnt -= post_cnt;
-                       spin_unlock_irq(&phba->hbalock);
+                       total_cnt -= post_cnt;
                }
 
                /* don't reset xirtag due to hole in xri block */
@@ -6097,6 +6102,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
                /* reset els sgl post count for next round of posting */
                post_cnt = 0;
        }
+       /* update the number of XRIs posted for ELS */
+       phba->sli4_hba.els_xri_cnt = total_cnt;
 
        /* free the els sgls failed to post */
        lpfc_free_sgl_list(phba, &free_sgl_list);
@@ -6446,16 +6453,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 
        /* Start the ELS watchdog timer */
        mod_timer(&vport->els_tmofunc,
-                 jiffies + HZ * (phba->fc_ratov * 2));
+                 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
 
        /* Start heart beat timer */
        mod_timer(&phba->hb_tmofunc,
-                 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+                 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
        phba->hb_outstanding = 0;
        phba->last_completion_time = jiffies;
 
        /* Start error attention (ERATT) polling timer */
-       mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+       mod_timer(&phba->eratt_poll,
+                 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
 
        /* Enable PCIe device Advanced Error Reporting (AER) if configured */
        if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
@@ -6822,8 +6830,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
                        goto out_not_finished;
                }
                /* timeout active mbox command */
-               mod_timer(&psli->mbox_tmo, (jiffies +
-                              (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
+               timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
+                                          1000);
+               mod_timer(&psli->mbox_tmo, jiffies + timeout);
        }
 
        /* Mailbox cmd <cmd> issue */
@@ -7496,7 +7505,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
 
        /* Start timer for the mbox_tmo and log some mailbox post messages */
        mod_timer(&psli->mbox_tmo, (jiffies +
-                 (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
+                 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
 
        lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
                        "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
@@ -7914,15 +7923,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
 static inline uint32_t
 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
 {
-       int i;
-
-       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU)
-               i = smp_processor_id();
-       else
-               i = atomic_add_return(1, &phba->fcp_qidx);
+       struct lpfc_vector_map_info *cpup;
+       int chann, cpu;
 
-       i = (i % phba->cfg_fcp_io_channel);
-       return i;
+       if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) {
+               cpu = smp_processor_id();
+               if (cpu < phba->sli4_hba.num_present_cpu) {
+                       cpup = phba->sli4_hba.cpu_map;
+                       cpup += cpu;
+                       return cpup->channel_id;
+               }
+               chann = cpu;
+       }
+       chann = atomic_add_return(1, &phba->fcp_qidx);
+       chann = (chann % phba->cfg_fcp_io_channel);
+       return chann;
 }
 
 /**
@@ -8444,10 +8459,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
 
        if ((piocb->iocb_flag & LPFC_IO_FCP) ||
                (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+               if (unlikely(!phba->sli4_hba.fcp_wq))
+                       return IOCB_ERROR;
                if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
                                     &wqe))
                        return IOCB_ERROR;
        } else {
+               if (unlikely(!phba->sli4_hba.els_wq))
+                       return IOCB_ERROR;
                if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
                        return IOCB_ERROR;
        }
@@ -10003,7 +10022,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
        retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
                                     SLI_IOCB_RET_IOCB);
        if (retval == IOCB_SUCCESS) {
-               timeout_req = timeout * HZ;
+               timeout_req = msecs_to_jiffies(timeout * 1000);
                timeleft = wait_event_timeout(done_q,
                                lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
                                timeout_req);
@@ -10108,7 +10127,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
        if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
                wait_event_interruptible_timeout(done_q,
                                pmboxq->mbox_flag & LPFC_MBX_WAKE,
-                               timeout * HZ);
+                               msecs_to_jiffies(timeout * 1000));
 
                spin_lock_irqsave(&phba->hbalock, flag);
                pmboxq->context1 = NULL;
@@ -12899,8 +12918,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
                }
                wq->db_regaddr = bar_memmap_p + db_offset;
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "3264 WQ[%d]: barset:x%x, offset:x%x\n",
-                               wq->queue_id, pci_barset, db_offset);
+                               "3264 WQ[%d]: barset:x%x, offset:x%x, "
+                               "format:x%x\n", wq->queue_id, pci_barset,
+                               db_offset, wq->db_format);
        } else {
                wq->db_format = LPFC_DB_LIST_FORMAT;
                wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
@@ -13120,8 +13140,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
                }
                hrq->db_regaddr = bar_memmap_p + db_offset;
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
-                               hrq->queue_id, pci_barset, db_offset);
+                               "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
+                               "format:x%x\n", hrq->queue_id, pci_barset,
+                               db_offset, hrq->db_format);
        } else {
                hrq->db_format = LPFC_DB_RING_FORMAT;
                hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
@@ -13971,13 +13992,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
        }
 
        lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
-                       "2538 Received frame rctl:%s type:%s "
-                       "Frame Data:%08x %08x %08x %08x %08x %08x\n",
-                       rctl_names[fc_hdr->fh_r_ctl],
-                       type_names[fc_hdr->fh_type],
+                       "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
+                       "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
+                       rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
+                       type_names[fc_hdr->fh_type], fc_hdr->fh_type,
                        be32_to_cpu(header[0]), be32_to_cpu(header[1]),
                        be32_to_cpu(header[2]), be32_to_cpu(header[3]),
-                       be32_to_cpu(header[4]), be32_to_cpu(header[5]));
+                       be32_to_cpu(header[4]), be32_to_cpu(header[5]),
+                       be32_to_cpu(header[6]));
        return 0;
 drop:
        lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
index be02b59ea2797a53972a1795dcd4b0b1c156a846..67af460184badc9a2104cd477e099aeb8112a573 100644 (file)
@@ -346,11 +346,6 @@ struct lpfc_bmbx {
 #define SLI4_CT_VFI 2
 #define SLI4_CT_FCFI 3
 
-#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000
-#define LPFC_SLI4_FL1_MAX_BUF_SIZE     0X2000
-#define LPFC_SLI4_MIN_BUF_SIZE         0x400
-#define LPFC_SLI4_MAX_BUF_SIZE         0x20000
-
 /*
  * SLI4 specific data structures
  */
@@ -440,6 +435,17 @@ struct lpfc_sli4_lnk_info {
 
 #define LPFC_SLI4_HANDLER_NAME_SZ      16
 
+/* Used for IRQ vector to CPU mapping */
+struct lpfc_vector_map_info {
+       uint16_t        phys_id;
+       uint16_t        core_id;
+       uint16_t        irq;
+       uint16_t        channel_id;
+       struct cpumask  maskbits;
+};
+#define LPFC_VECTOR_MAP_EMPTY  0xffff
+#define LPFC_MAX_CPU           256
+
 /* SLI4 HBA data structure entries */
 struct lpfc_sli4_hba {
        void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -573,6 +579,11 @@ struct lpfc_sli4_hba {
        struct lpfc_iov iov;
        spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
        spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+
+       /* CPU to vector mapping information */
+       struct lpfc_vector_map_info *cpu_map;
+       uint16_t num_online_cpu;
+       uint16_t num_present_cpu;
 };
 
 enum lpfc_sge_type {
index 664cd04f7cd8091dcaf0396f889b0d0852a4e6f8..a38dc3b169697ee2915cee01158334a240eb51b1 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.38"
+#define LPFC_DRIVER_VERSION "8.3.39"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 0fe188e66000da063c01feec76b557a075545258..e28e431564b08f5b90693c3dc49c5d218e67a262 100644 (file)
@@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport,
        }
 }
 
-static int
+int
 lpfc_alloc_vpi(struct lpfc_hba *phba)
 {
        unsigned long vpi;
@@ -568,6 +568,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
        struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
        struct lpfc_hba   *phba = vport->phba;
        long timeout;
+       bool ns_ndlp_referenced = false;
 
        if (vport->port_type == LPFC_PHYSICAL_PORT) {
                lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
@@ -628,6 +629,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
 
        lpfc_debugfs_terminate(vport);
 
+       /*
+        * The call to fc_remove_host might release the NameServer ndlp. Since
+        * we might need to use the ndlp to send the DA_ID CT command,
+        * increment the reference for the NameServer ndlp to prevent it from
+        * being released.
+        */
+       ndlp = lpfc_findnode_did(vport, NameServer_DID);
+       if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+               lpfc_nlp_get(ndlp);
+               ns_ndlp_referenced = true;
+       }
+
        /* Remove FC host and then SCSI host with the vport */
        fc_remove_host(lpfc_shost_from_vport(vport));
        scsi_remove_host(lpfc_shost_from_vport(vport));
@@ -734,6 +747,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
                lpfc_discovery_wait(vport);
 
 skip_logo:
+
+       /*
+        * If the NameServer ndlp has been incremented to allow the DA_ID CT
+        * command to be sent, decrement the ndlp now.
+        */
+       if (ns_ndlp_referenced) {
+               ndlp = lpfc_findnode_did(vport, NameServer_DID);
+               lpfc_nlp_put(ndlp);
+       }
+
        lpfc_cleanup(vport);
        lpfc_sli_host_down(vport);
 
index 90828340aceadbd275c7184b8ad465582e894e2a..6b2c94eb8134300e72b4d06571994c0c2fe5d32b 100644 (file)
@@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
 int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
 struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *);
 void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **);
+int lpfc_alloc_vpi(struct lpfc_hba *phba);
 
 /*
  *  queuecommand  VPORT-specific return codes. Specified in  the host byte code.
index 7c90d57b867e2c332e1a953a3c01892f971bd779..3a9ddae86f1f8e2d5e33474a7f0f3510ec413c65 100644 (file)
@@ -4931,11 +4931,12 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
                printk(KERN_ERR "megaraid_sas: timed out while"
                        "waiting for HBA to recover\n");
                error = -ENODEV;
-               goto out_kfree_ioc;
+               goto out_up;
        }
        spin_unlock_irqrestore(&instance->hba_lock, flags);
 
        error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
+      out_up:
        up(&instance->ioctl_sem);
 
       out_kfree_ioc:
index 74550922ad55e418eb7e37753e0983023728943b..7b7381d7671fdc9cfeecdf3c256984b0261d3116 100644 (file)
@@ -254,7 +254,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
        }
        for (i = 0; i < MVS_MAX_DEVICES; i++) {
                mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
-               mvi->devices[i].dev_type = NO_DEVICE;
+               mvi->devices[i].dev_type = SAS_PHY_UNUSED;
                mvi->devices[i].device_id = i;
                mvi->devices[i].dev_status = MVS_DEV_NORMAL;
                init_timer(&mvi->devices[i].timer);
index 532110f4562af58b7517b7b72bfaf0d0f5e25653..c9e244984e30ec1dfed39c7b574b71caffeb0794 100644 (file)
@@ -706,7 +706,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
        return 0;
 }
 
-#define        DEV_IS_GONE(mvi_dev)    ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
+#define        DEV_IS_GONE(mvi_dev)    ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
 static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
                                struct mvs_tmf_task *tmf, int *pass)
 {
@@ -726,7 +726,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
                 * libsas will use dev->port, should
                 * not call task_done for sata
                 */
-               if (dev->dev_type != SATA_DEV)
+               if (dev->dev_type != SAS_SATA_DEV)
                        task->task_done(task);
                return rc;
        }
@@ -1159,10 +1159,10 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
                        phy->identify.device_type =
                                phy->att_dev_info & PORT_DEV_TYPE_MASK;
 
-                       if (phy->identify.device_type == SAS_END_DEV)
+                       if (phy->identify.device_type == SAS_END_DEVICE)
                                phy->identify.target_port_protocols =
                                                        SAS_PROTOCOL_SSP;
-                       else if (phy->identify.device_type != NO_DEVICE)
+                       else if (phy->identify.device_type != SAS_PHY_UNUSED)
                                phy->identify.target_port_protocols =
                                                        SAS_PROTOCOL_SMP;
                        if (oob_done)
@@ -1260,7 +1260,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
 {
        u32 dev;
        for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
-               if (mvi->devices[dev].dev_type == NO_DEVICE) {
+               if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) {
                        mvi->devices[dev].device_id = dev;
                        return &mvi->devices[dev];
                }
@@ -1278,7 +1278,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev)
        u32 id = mvi_dev->device_id;
        memset(mvi_dev, 0, sizeof(*mvi_dev));
        mvi_dev->device_id = id;
-       mvi_dev->dev_type = NO_DEVICE;
+       mvi_dev->dev_type = SAS_PHY_UNUSED;
        mvi_dev->dev_status = MVS_DEV_NORMAL;
        mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
 }
@@ -1480,7 +1480,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
 {
        int rc;
        struct sas_phy *phy = sas_get_local_phy(dev);
-       int reset_type = (dev->dev_type == SATA_DEV ||
+       int reset_type = (dev->dev_type == SAS_SATA_DEV ||
                        (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
        rc = sas_phy_reset(phy, reset_type);
        sas_put_local_phy(phy);
@@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task)
 
        } else if (task->task_proto & SAS_PROTOCOL_SATA ||
                task->task_proto & SAS_PROTOCOL_STP) {
-               if (SATA_DEV == dev->dev_type) {
+               if (SAS_SATA_DEV == dev->dev_type) {
                        struct mvs_slot_info *slot = task->lldd_task;
                        u32 slot_idx = (u32)(slot - mvi->slot_info);
                        mv_dprintk("mvs_abort_task() mvi=%p task=%p "
index 9f3cc13a5ce7ef70ca9c59c29530a8481fd679fc..60e2fb7f2dca7e37512b128bea9b59fd1c9cd579 100644 (file)
@@ -67,7 +67,7 @@ extern const struct mvs_dispatch mvs_94xx_dispatch;
 extern struct kmem_cache *mvs_task_list_cache;
 
 #define DEV_IS_EXPANDER(type)  \
-       ((type == EDGE_DEV) || (type == FANOUT_DEV))
+       ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
 
 #define bit(n) ((u64)1 << n)
 
@@ -241,7 +241,7 @@ struct mvs_phy {
 
 struct mvs_device {
        struct list_head                dev_entry;
-       enum sas_dev_type dev_type;
+       enum sas_device_type dev_type;
        struct mvs_info *mvi_info;
        struct domain_device *sas_device;
        struct timer_list timer;
index 52f04296171c3e1c3dc0963f6e5e8a6b3037f92f..ce4cd87c7c662a6de92f71331e1e49052f4c8e4f 100644 (file)
@@ -4,9 +4,10 @@
 # Copyright (C) 2008-2009  USI Co., Ltd.
 
 
-obj-$(CONFIG_SCSI_PM8001) += pm8001.o
-pm8001-y += pm8001_init.o \
+obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
+pm80xx-y += pm8001_init.o \
                pm8001_sas.o  \
                pm8001_ctl.o  \
-               pm8001_hwi.o
+               pm8001_hwi.o  \
+               pm80xx_hwi.o
 
index 45bc197bc22f2a45c46f5176cabefddc69e077f2..d99f41c2ca13b927c66d34036937d3443b29f862 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -58,8 +58,13 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-               pm8001_ha->main_cfg_tbl.interface_rev);
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev);
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev);
+       }
 }
 static
 DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL);
@@ -78,11 +83,19 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24),
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16),
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8),
-                      (u8)(pm8001_ha->main_cfg_tbl.firmware_rev));
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24),
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16),
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8),
+               (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev));
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24),
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16),
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8),
+               (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev));
+       }
 }
 static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL);
 /**
@@ -99,8 +112,13 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-                       pm8001_ha->main_cfg_tbl.max_out_io);
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io);
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%d\n",
+                       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io);
+       }
 }
 static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL);
 /**
@@ -117,8 +135,15 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%04d\n",
-                       (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16));
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)
+                       );
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)
+                       );
+       }
 }
 static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
 /**
@@ -136,8 +161,15 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 
-       return snprintf(buf, PAGE_SIZE, "%04d\n",
-                       pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF);
+       if (pm8001_ha->chip_id == chip_8001) {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF
+                       );
+       } else {
+               return snprintf(buf, PAGE_SIZE, "%04d\n",
+                       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF
+                       );
+       }
 }
 static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
 
@@ -173,7 +205,14 @@ static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev,
        struct Scsi_Host *shost = class_to_shost(cdev);
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
-       mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25;
+       /* fe000000 means supports SAS2.1 */
+       if (pm8001_ha->chip_id == chip_8001)
+               mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag &
+                                                       0xfe000000)>>25;
+       else
+               /* fe000000 means supports SAS2.1 */
+               mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag &
+                                                       0xfe000000)>>25;
        return show_sas_spec_support_status(mode, buf);
 }
 static DEVICE_ATTR(sas_spec_support, S_IRUGO,
@@ -361,10 +400,11 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
                goto out;
        }
        payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
-       memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
+       memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
                                pm8001_ha->fw_image->size);
        payload->length = pm8001_ha->fw_image->size;
        payload->id = 0;
+       payload->minor_function = 0x1;
        pm8001_ha->nvmd_completion = &completion;
        ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
        wait_for_completion(&completion);
@@ -411,7 +451,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
                        payload->length = 1024*16;
                        payload->id = 0;
                        fwControl =
-                             (struct fw_control_info *)payload->func_specific;
+                             (struct fw_control_info *)&payload->func_specific;
                        fwControl->len = IOCTL_BUF_SIZE;   /* IN */
                        fwControl->size = partitionSize + HEADER_LEN;/* IN */
                        fwControl->retcode = 0;/* OUT */
index c3d20c8d4abe3724308182697a558dac7440a07a..479c5a7a863a8aa8fded8e4164b8377d3658089d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
 
 enum chip_flavors {
        chip_8001,
+       chip_8008,
+       chip_8009,
+       chip_8018,
+       chip_8019
 };
-#define USI_MAX_MEMCNT                 9
-#define PM8001_MAX_DMA_SG              SG_ALL
+
 enum phy_speed {
        PHY_SPEED_15 = 0x01,
        PHY_SPEED_30 = 0x02,
@@ -69,23 +72,34 @@ enum port_type {
 #define PM8001_MPI_QUEUE         1024   /* maximum mpi queue entries */
 #define        PM8001_MAX_INB_NUM       1
 #define        PM8001_MAX_OUTB_NUM      1
+#define        PM8001_MAX_SPCV_INB_NUM         1
+#define        PM8001_MAX_SPCV_OUTB_NUM        4
 #define        PM8001_CAN_QUEUE         508    /* SCSI Queue depth */
 
+/* Inbound/Outbound queue size */
+#define IOMB_SIZE_SPC          64
+#define IOMB_SIZE_SPCV         128
+
 /* unchangeable hardware details */
-#define        PM8001_MAX_PHYS          8      /* max. possible phys */
-#define        PM8001_MAX_PORTS         8      /* max. possible ports */
-#define        PM8001_MAX_DEVICES       1024   /* max supported device */
+#define        PM8001_MAX_PHYS          16     /* max. possible phys */
+#define        PM8001_MAX_PORTS         16     /* max. possible ports */
+#define        PM8001_MAX_DEVICES       2048   /* max supported device */
+#define        PM8001_MAX_MSIX_VEC      64     /* max msi-x int for spcv/ve */
 
+#define USI_MAX_MEMCNT_BASE    5
+#define IB                     (USI_MAX_MEMCNT_BASE + 1)
+#define CI                     (IB + PM8001_MAX_SPCV_INB_NUM)
+#define OB                     (CI + PM8001_MAX_SPCV_INB_NUM)
+#define PI                     (OB + PM8001_MAX_SPCV_OUTB_NUM)
+#define USI_MAX_MEMCNT         (PI + PM8001_MAX_SPCV_OUTB_NUM)
+#define PM8001_MAX_DMA_SG      SG_ALL
 enum memory_region_num {
        AAP1 = 0x0, /* application acceleration processor */
        IOP,        /* IO processor */
-       CI,         /* consumer index */
-       PI,         /* producer index */
-       IB,         /* inbound queue */
-       OB,         /* outbound queue */
        NVMD,       /* NVM device */
        DEV_MEM,    /* memory for devices */
        CCB_MEM,    /* memory for command control block */
+       FW_FLASH    /* memory for fw flash update */
 };
 #define        PM8001_EVENT_LOG_SIZE    (128 * 1024)
 
index b8dd05074abb4f61faebbe0af12bd3074fd8b930..69dd49c05f1e1069b2aea0544734f52ea5126abb 100644 (file)
 static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
 {
        void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
-       pm8001_ha->main_cfg_tbl.signature       = pm8001_mr32(address, 0x00);
-       pm8001_ha->main_cfg_tbl.interface_rev   = pm8001_mr32(address, 0x04);
-       pm8001_ha->main_cfg_tbl.firmware_rev    = pm8001_mr32(address, 0x08);
-       pm8001_ha->main_cfg_tbl.max_out_io      = pm8001_mr32(address, 0x0C);
-       pm8001_ha->main_cfg_tbl.max_sgl         = pm8001_mr32(address, 0x10);
-       pm8001_ha->main_cfg_tbl.ctrl_cap_flag   = pm8001_mr32(address, 0x14);
-       pm8001_ha->main_cfg_tbl.gst_offset      = pm8001_mr32(address, 0x18);
-       pm8001_ha->main_cfg_tbl.inbound_queue_offset =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.signature    =
+                               pm8001_mr32(address, 0x00);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev =
+                               pm8001_mr32(address, 0x04);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev =
+                               pm8001_mr32(address, 0x08);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io   =
+                               pm8001_mr32(address, 0x0C);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl      =
+                               pm8001_mr32(address, 0x10);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag =
+                               pm8001_mr32(address, 0x14);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset   =
+                               pm8001_mr32(address, 0x18);
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset =
                pm8001_mr32(address, MAIN_IBQ_OFFSET);
-       pm8001_ha->main_cfg_tbl.outbound_queue_offset =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset =
                pm8001_mr32(address, MAIN_OBQ_OFFSET);
-       pm8001_ha->main_cfg_tbl.hda_mode_flag   =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag        =
                pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET);
 
        /* read analog Setting offset from the configuration table */
-       pm8001_ha->main_cfg_tbl.anolog_setup_table_offset =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset =
                pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
 
        /* read Error Dump Offset and Length */
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
-       pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 =
                pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
 }
 
@@ -86,31 +93,56 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
 static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
 {
        void __iomem *address = pm8001_ha->general_stat_tbl_addr;
-       pm8001_ha->gs_tbl.gst_len_mpistate      = pm8001_mr32(address, 0x00);
-       pm8001_ha->gs_tbl.iq_freeze_state0      = pm8001_mr32(address, 0x04);
-       pm8001_ha->gs_tbl.iq_freeze_state1      = pm8001_mr32(address, 0x08);
-       pm8001_ha->gs_tbl.msgu_tcnt             = pm8001_mr32(address, 0x0C);
-       pm8001_ha->gs_tbl.iop_tcnt              = pm8001_mr32(address, 0x10);
-       pm8001_ha->gs_tbl.reserved              = pm8001_mr32(address, 0x14);
-       pm8001_ha->gs_tbl.phy_state[0]  = pm8001_mr32(address, 0x18);
-       pm8001_ha->gs_tbl.phy_state[1]  = pm8001_mr32(address, 0x1C);
-       pm8001_ha->gs_tbl.phy_state[2]  = pm8001_mr32(address, 0x20);
-       pm8001_ha->gs_tbl.phy_state[3]  = pm8001_mr32(address, 0x24);
-       pm8001_ha->gs_tbl.phy_state[4]  = pm8001_mr32(address, 0x28);
-       pm8001_ha->gs_tbl.phy_state[5]  = pm8001_mr32(address, 0x2C);
-       pm8001_ha->gs_tbl.phy_state[6]  = pm8001_mr32(address, 0x30);
-       pm8001_ha->gs_tbl.phy_state[7]  = pm8001_mr32(address, 0x34);
-       pm8001_ha->gs_tbl.reserved1             = pm8001_mr32(address, 0x38);
-       pm8001_ha->gs_tbl.reserved2             = pm8001_mr32(address, 0x3C);
-       pm8001_ha->gs_tbl.reserved3             = pm8001_mr32(address, 0x40);
-       pm8001_ha->gs_tbl.recover_err_info[0]   = pm8001_mr32(address, 0x44);
-       pm8001_ha->gs_tbl.recover_err_info[1]   = pm8001_mr32(address, 0x48);
-       pm8001_ha->gs_tbl.recover_err_info[2]   = pm8001_mr32(address, 0x4C);
-       pm8001_ha->gs_tbl.recover_err_info[3]   = pm8001_mr32(address, 0x50);
-       pm8001_ha->gs_tbl.recover_err_info[4]   = pm8001_mr32(address, 0x54);
-       pm8001_ha->gs_tbl.recover_err_info[5]   = pm8001_mr32(address, 0x58);
-       pm8001_ha->gs_tbl.recover_err_info[6]   = pm8001_mr32(address, 0x5C);
-       pm8001_ha->gs_tbl.recover_err_info[7]   = pm8001_mr32(address, 0x60);
+       pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate   =
+                               pm8001_mr32(address, 0x00);
+       pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0   =
+                               pm8001_mr32(address, 0x04);
+       pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1   =
+                               pm8001_mr32(address, 0x08);
+       pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt          =
+                               pm8001_mr32(address, 0x0C);
+       pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt           =
+                               pm8001_mr32(address, 0x10);
+       pm8001_ha->gs_tbl.pm8001_tbl.rsvd               =
+                               pm8001_mr32(address, 0x14);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0]       =
+                               pm8001_mr32(address, 0x18);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1]       =
+                               pm8001_mr32(address, 0x1C);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2]       =
+                               pm8001_mr32(address, 0x20);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3]       =
+                               pm8001_mr32(address, 0x24);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4]       =
+                               pm8001_mr32(address, 0x28);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5]       =
+                               pm8001_mr32(address, 0x2C);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6]       =
+                               pm8001_mr32(address, 0x30);
+       pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7]       =
+                               pm8001_mr32(address, 0x34);
+       pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val     =
+                               pm8001_mr32(address, 0x38);
+       pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0]           =
+                               pm8001_mr32(address, 0x3C);
+       pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1]           =
+                               pm8001_mr32(address, 0x40);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0]        =
+                               pm8001_mr32(address, 0x44);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1]        =
+                               pm8001_mr32(address, 0x48);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2]        =
+                               pm8001_mr32(address, 0x4C);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3]        =
+                               pm8001_mr32(address, 0x50);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4]        =
+                               pm8001_mr32(address, 0x54);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5]        =
+                               pm8001_mr32(address, 0x58);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6]        =
+                               pm8001_mr32(address, 0x5C);
+       pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7]        =
+                               pm8001_mr32(address, 0x60);
 }
 
 /**
@@ -119,10 +151,9 @@ static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
  */
 static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
 {
-       int inbQ_num = 1;
        int i;
        void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
-       for (i = 0; i < inbQ_num; i++) {
+       for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
                u32 offset = i * 0x20;
                pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
                      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -137,10 +168,9 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
  */
 static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
 {
-       int outbQ_num = 1;
        int i;
        void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
-       for (i = 0; i < outbQ_num; i++) {
+       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
                u32 offset = i * 0x24;
                pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
                      get_pci_bar_index(pm8001_mr32(address, (offset + 0x14)));
@@ -155,54 +185,57 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
  */
 static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
 {
-       int qn = 1;
        int i;
        u32 offsetib, offsetob;
        void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
        void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
 
-       pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd                     = 0;
-       pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3                = 0;
-       pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7                = 0;
-       pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3               = 0;
-       pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7               = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3       = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7       = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3   = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7   = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3   = 0;
-       pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7   = 0;
-
-       pm8001_ha->main_cfg_tbl.upper_event_log_addr            =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd          = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3     = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7     = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3    = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7    = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 =
+                                                                        0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 =
+                                                                        0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0;
+
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr         =
                pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
-       pm8001_ha->main_cfg_tbl.lower_event_log_addr            =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr         =
                pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
-       pm8001_ha->main_cfg_tbl.event_log_size  = PM8001_EVENT_LOG_SIZE;
-       pm8001_ha->main_cfg_tbl.event_log_option                = 0x01;
-       pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr        =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size               =
+               PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option             = 0x01;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr     =
                pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
-       pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr        =
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr     =
                pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
-       pm8001_ha->main_cfg_tbl.iop_event_log_size      = PM8001_EVENT_LOG_SIZE;
-       pm8001_ha->main_cfg_tbl.iop_event_log_option            = 0x01;
-       pm8001_ha->main_cfg_tbl.fatal_err_interrupt             = 0x01;
-       for (i = 0; i < qn; i++) {
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size           =
+               PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option         = 0x01;
+       pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt          = 0x01;
+       for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
                pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt  =
                        PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
                pm8001_ha->inbnd_q_tbl[i].upper_base_addr       =
-                       pm8001_ha->memoryMap.region[IB].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
                pm8001_ha->inbnd_q_tbl[i].lower_base_addr       =
-               pm8001_ha->memoryMap.region[IB].phys_addr_lo;
+               pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
                pm8001_ha->inbnd_q_tbl[i].base_virt             =
-                       (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr;
+                       (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
                pm8001_ha->inbnd_q_tbl[i].total_length          =
-                       pm8001_ha->memoryMap.region[IB].total_len;
+                       pm8001_ha->memoryMap.region[IB + i].total_len;
                pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr    =
-                       pm8001_ha->memoryMap.region[CI].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
                pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr    =
-                       pm8001_ha->memoryMap.region[CI].phys_addr_lo;
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
                pm8001_ha->inbnd_q_tbl[i].ci_virt               =
-                       pm8001_ha->memoryMap.region[CI].virt_ptr;
+                       pm8001_ha->memoryMap.region[CI + i].virt_ptr;
                offsetib = i * 0x20;
                pm8001_ha->inbnd_q_tbl[i].pi_pci_bar            =
                        get_pci_bar_index(pm8001_mr32(addressib,
@@ -212,25 +245,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
                pm8001_ha->inbnd_q_tbl[i].producer_idx          = 0;
                pm8001_ha->inbnd_q_tbl[i].consumer_index        = 0;
        }
-       for (i = 0; i < qn; i++) {
+       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
                pm8001_ha->outbnd_q_tbl[i].element_size_cnt     =
                        PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
                pm8001_ha->outbnd_q_tbl[i].upper_base_addr      =
-                       pm8001_ha->memoryMap.region[OB].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
                pm8001_ha->outbnd_q_tbl[i].lower_base_addr      =
-                       pm8001_ha->memoryMap.region[OB].phys_addr_lo;
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
                pm8001_ha->outbnd_q_tbl[i].base_virt            =
-                       (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr;
+                       (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
                pm8001_ha->outbnd_q_tbl[i].total_length         =
-                       pm8001_ha->memoryMap.region[OB].total_len;
+                       pm8001_ha->memoryMap.region[OB + i].total_len;
                pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr   =
-                       pm8001_ha->memoryMap.region[PI].phys_addr_hi;
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
                pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr   =
-                       pm8001_ha->memoryMap.region[PI].phys_addr_lo;
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
                pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay       =
-                       0 | (10 << 16) | (0 << 24);
+                       0 | (10 << 16) | (i << 24);
                pm8001_ha->outbnd_q_tbl[i].pi_virt              =
-                       pm8001_ha->memoryMap.region[PI].virt_ptr;
+                       pm8001_ha->memoryMap.region[PI + i].virt_ptr;
                offsetob = i * 0x24;
                pm8001_ha->outbnd_q_tbl[i].ci_pci_bar           =
                        get_pci_bar_index(pm8001_mr32(addressob,
@@ -250,42 +283,51 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
 {
        void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
        pm8001_mw32(address, 0x24,
-               pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd);
        pm8001_mw32(address, 0x28,
-               pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3);
        pm8001_mw32(address, 0x2C,
-               pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7);
        pm8001_mw32(address, 0x30,
-               pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3);
        pm8001_mw32(address, 0x34,
-               pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7);
        pm8001_mw32(address, 0x38,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ITNexus_event_pid0_3);
        pm8001_mw32(address, 0x3C,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ITNexus_event_pid4_7);
        pm8001_mw32(address, 0x40,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ssp_event_pid0_3);
        pm8001_mw32(address, 0x44,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_ssp_event_pid4_7);
        pm8001_mw32(address, 0x48,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_smp_event_pid0_3);
        pm8001_mw32(address, 0x4C,
-               pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.
+                                       outbound_tgt_smp_event_pid4_7);
        pm8001_mw32(address, 0x50,
-               pm8001_ha->main_cfg_tbl.upper_event_log_addr);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr);
        pm8001_mw32(address, 0x54,
-               pm8001_ha->main_cfg_tbl.lower_event_log_addr);
-       pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size);
-       pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr);
+       pm8001_mw32(address, 0x58,
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size);
+       pm8001_mw32(address, 0x5C,
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option);
        pm8001_mw32(address, 0x60,
-               pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr);
        pm8001_mw32(address, 0x64,
-               pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr);
-       pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr);
+       pm8001_mw32(address, 0x68,
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size);
        pm8001_mw32(address, 0x6C,
-               pm8001_ha->main_cfg_tbl.iop_event_log_option);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option);
        pm8001_mw32(address, 0x70,
-               pm8001_ha->main_cfg_tbl.fatal_err_interrupt);
+               pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt);
 }
 
 /**
@@ -597,6 +639,19 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
  */
 static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
 {
+       u8 i = 0;
+       u16 deviceid;
+       pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+       /* 8081 controllers need BAR shift to access MPI space
+       * as this is shared with BIOS data */
+       if (deviceid == 0x8081) {
+               if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("Shift Bar4 to 0x%x failed\n",
+                                       GSM_SM_BASE));
+                       return -1;
+               }
+       }
        /* check the firmware status */
        if (-1 == check_fw_ready(pm8001_ha)) {
                PM8001_FAIL_DBG(pm8001_ha,
@@ -613,11 +668,16 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
        read_outbnd_queue_table(pm8001_ha);
        /* update main config table ,inbound table and outbound table */
        update_main_config_table(pm8001_ha);
-       update_inbnd_queue_table(pm8001_ha, 0);
-       update_outbnd_queue_table(pm8001_ha, 0);
-       mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
-       /* 7->130ms, 34->500ms, 119->1.5s */
-       mpi_set_open_retry_interval_reg(pm8001_ha, 119);
+       for (i = 0; i < PM8001_MAX_INB_NUM; i++)
+               update_inbnd_queue_table(pm8001_ha, i);
+       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
+               update_outbnd_queue_table(pm8001_ha, i);
+       /* 8081 controller donot require these operations */
+       if (deviceid != 0x8081) {
+               mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
+               /* 7->130ms, 34->500ms, 119->1.5s */
+               mpi_set_open_retry_interval_reg(pm8001_ha, 119);
+       }
        /* notify firmware update finished and check initialization status */
        if (0 == mpi_init_check(pm8001_ha)) {
                PM8001_INIT_DBG(pm8001_ha,
@@ -639,6 +699,16 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
        u32 max_wait_count;
        u32 value;
        u32 gst_len_mpistate;
+       u16 deviceid;
+       pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
+       if (deviceid == 0x8081) {
+               if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {
+                       PM8001_FAIL_DBG(pm8001_ha,
+                               pm8001_printk("Shift Bar4 to 0x%x failed\n",
+                                       GSM_SM_BASE));
+                       return -1;
+               }
+       }
        init_pci_device_addresses(pm8001_ha);
        /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
        table is stop */
@@ -740,14 +810,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
  * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
  * the FW register status to the originated status.
  * @pm8001_ha: our hba card information
- * @signature: signature in host scratch pad0 register.
  */
 static int
-pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
+pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
 {
        u32     regVal, toggleVal;
        u32     max_wait_count;
        u32     regVal1, regVal2, regVal3;
+       u32     signature = 0x252acbcd; /* for host scratch pad0 */
        unsigned long flags;
 
        /* step1: Check FW is ready for soft reset */
@@ -1113,7 +1183,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
  * pm8001_chip_iounmap - which maped when initialized.
  * @pm8001_ha: our hba card information
  */
-static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
+void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
 {
        s8 bar, logical = 0;
        for (bar = 0; bar < 6; bar++) {
@@ -1192,7 +1262,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
  * @pm8001_ha: our hba card information
  */
 static void
-pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
        pm8001_chip_msix_interrupt_enable(pm8001_ha, 0);
@@ -1207,7 +1277,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
  * @pm8001_ha: our hba card information
  */
 static void
-pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
        pm8001_chip_msix_interrupt_disable(pm8001_ha, 0);
@@ -1218,12 +1288,13 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
 }
 
 /**
- * mpi_msg_free_get- get the free message buffer for transfer inbound queue.
+ * pm8001_mpi_msg_free_get - get the free message buffer for transfer
+ * inbound queue.
  * @circularQ: the inbound queue  we want to transfer to HBA.
  * @messageSize: the message size of this transfer, normally it is 64 bytes
  * @messagePtr: the pointer to message.
  */
-static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
+int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
                            u16 messageSize, void **messagePtr)
 {
        u32 offset, consumer_index;
@@ -1231,7 +1302,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
        u8 bcCount = 1; /* only support single buffer */
 
        /* Checks is the requested message size can be allocated in this queue*/
-       if (messageSize > 64) {
+       if (messageSize > IOMB_SIZE_SPCV) {
                *messagePtr = NULL;
                return -1;
        }
@@ -1245,7 +1316,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
                return -1;
        }
        /* get memory IOMB buffer address */
-       offset = circularQ->producer_idx * 64;
+       offset = circularQ->producer_idx * messageSize;
        /* increment to next bcCount element */
        circularQ->producer_idx = (circularQ->producer_idx + bcCount)
                                % PM8001_MPI_QUEUE;
@@ -1257,29 +1328,30 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
 }
 
 /**
- * mpi_build_cmd- build the message queue for transfer, update the PI to FW
- * to tell the fw to get this message from IOMB.
+ * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to
+ * FW to tell the fw to get this message from IOMB.
  * @pm8001_ha: our hba card information
  * @circularQ: the inbound queue we want to transfer to HBA.
  * @opCode: the operation code represents commands which LLDD and fw recognized.
  * @payload: the command payload of each operation command.
  */
-static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
                         struct inbound_queue_table *circularQ,
-                        u32 opCode, void *payload)
+                        u32 opCode, void *payload, u32 responseQueue)
 {
        u32 Header = 0, hpriority = 0, bc = 1, category = 0x02;
-       u32 responseQueue = 0;
        void *pMessage;
 
-       if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) {
+       if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
+               &pMessage) < 0) {
                PM8001_IO_DBG(pm8001_ha,
                        pm8001_printk("No free mpi buffer\n"));
                return -1;
        }
        BUG_ON(!payload);
        /*Copy to the payload*/
-       memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr)));
+       memcpy(pMessage, payload, (pm8001_ha->iomb_size -
+                               sizeof(struct mpi_msg_hdr)));
 
        /*Build the header*/
        Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24)
@@ -1291,12 +1363,13 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
        pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
                circularQ->pi_offset, circularQ->producer_idx);
        PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx,
-               circularQ->consumer_index));
+               pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n",
+                       responseQueue, opCode, circularQ->producer_idx,
+                       circularQ->consumer_index));
        return 0;
 }
 
-static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
                            struct outbound_queue_table *circularQ, u8 bc)
 {
        u32 producer_index;
@@ -1305,7 +1378,7 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
 
        msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr));
        pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt +
-                               circularQ->consumer_idx * 64);
+                               circularQ->consumer_idx * pm8001_ha->iomb_size);
        if (pOutBoundMsgHeader != msgHeader) {
                PM8001_FAIL_DBG(pm8001_ha,
                        pm8001_printk("consumer_idx = %d msgHeader = %p\n",
@@ -1336,13 +1409,14 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
 }
 
 /**
- * mpi_msg_consume- get the MPI message from  outbound queue message table.
+ * pm8001_mpi_msg_consume- get the MPI message from outbound queue
+ * message table.
  * @pm8001_ha: our hba card information
  * @circularQ: the outbound queue  table.
  * @messagePtr1: the message contents of this outbound message.
  * @pBC: the message size.
  */
-static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
                           struct outbound_queue_table *circularQ,
                           void **messagePtr1, u8 *pBC)
 {
@@ -1356,7 +1430,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
                        /*Get the pointer to the circular queue buffer element*/
                        msgHeader = (struct mpi_msg_hdr *)
                                (circularQ->base_virt +
-                               circularQ->consumer_idx * 64);
+                               circularQ->consumer_idx * pm8001_ha->iomb_size);
                        /* read header */
                        header_tmp = pm8001_read_32(msgHeader);
                        msgHeader_tmp = cpu_to_le32(header_tmp);
@@ -1416,7 +1490,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
        return MPI_IO_STATUS_BUSY;
 }
 
-static void pm8001_work_fn(struct work_struct *work)
+void pm8001_work_fn(struct work_struct *work)
 {
        struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
        struct pm8001_device *pm8001_dev;
@@ -1431,7 +1505,7 @@ static void pm8001_work_fn(struct work_struct *work)
        pm8001_dev = pw->data; /* Most stash device structure */
        if ((pm8001_dev == NULL)
         || ((pw->handler != IO_XFER_ERROR_BREAK)
-         && (pm8001_dev->dev_type == NO_DEVICE))) {
+         && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) {
                kfree(pw);
                return;
        }
@@ -1596,7 +1670,7 @@ static void pm8001_work_fn(struct work_struct *work)
        }       break;
        case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
                dev = pm8001_dev->sas_device;
-               pm8001_I_T_nexus_reset(dev);
+               pm8001_I_T_nexus_event_handler(dev);
                break;
        case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
                dev = pm8001_dev->sas_device;
@@ -1614,7 +1688,7 @@ static void pm8001_work_fn(struct work_struct *work)
        kfree(pw);
 }
 
-static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
+int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
                               int handler)
 {
        struct pm8001_work *pw;
@@ -1633,6 +1707,123 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
        return ret;
 }
 
+static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct task_abort_req task_abort;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_ABORT;
+       int ret;
+
+       if (!pm8001_ha_dev) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+               return;
+       }
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
+                                               "allocate task\n"));
+               return;
+       }
+
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res)
+               return;
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       memset(&task_abort, 0, sizeof(task_abort));
+       task_abort.abort_all = cpu_to_le32(1);
+       task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       task_abort.tag = cpu_to_le32(ccb_tag);
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+
+}
+
+static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       struct sata_start_req sata_cmd;
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct host_to_dev_fis fis;
+       struct domain_device *dev;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate task !!!\n"));
+               return;
+       }
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate tag !!!\n"));
+               return;
+       }
+
+       /* allocate domain device by ourselves as libsas
+        * is not going to provide any
+       */
+       dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
+       if (!dev) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Domain device cannot be allocated\n"));
+               sas_free_task(task);
+               return;
+       } else {
+               task->dev = dev;
+               task->dev->lldd_dev = pm8001_ha_dev;
+       }
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+       pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
+       pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
+
+       memset(&sata_cmd, 0, sizeof(sata_cmd));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       /* construct read log FIS */
+       memset(&fis, 0, sizeof(struct host_to_dev_fis));
+       fis.fis_type = 0x27;
+       fis.flags = 0x80;
+       fis.command = ATA_CMD_READ_LOG_EXT;
+       fis.lbal = 0x10;
+       fis.sector_count = 0x1;
+
+       sata_cmd.tag = cpu_to_le32(ccb_tag);
+       sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9));
+       memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
+
+       res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+
+}
+
 /**
  * mpi_ssp_completion- process the event that FW response to the SSP request.
  * @pm8001_ha: our hba card information
@@ -1867,7 +2058,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
                break;
        }
        PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk("scsi_status = %x \n ",
+               pm8001_printk("scsi_status = %x\n ",
                psspPayload->ssp_resp_iu.status));
        spin_lock_irqsave(&t->task_state_lock, flags);
        t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
@@ -2096,16 +2287,44 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        status = le32_to_cpu(psataPayload->status);
        tag = le32_to_cpu(psataPayload->tag);
 
+       if (!tag) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("tag null\n"));
+               return;
+       }
        ccb = &pm8001_ha->ccb_info[tag];
        param = le32_to_cpu(psataPayload->param);
-       t = ccb->task;
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("ccb null\n"));
+               return;
+       }
+
+       if (t) {
+               if (t->dev && (t->dev->lldd_dev))
+                       pm8001_dev = t->dev->lldd_dev;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task null\n"));
+               return;
+       }
+
+       if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
+               && unlikely(!t || !t->lldd_task || !t->dev)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task or dev null\n"));
+               return;
+       }
+
        ts = &t->task_status;
-       pm8001_dev = ccb->device;
-       if (status)
+       if (!ts) {
                PM8001_FAIL_DBG(pm8001_ha,
-                       pm8001_printk("sata IO status 0x%x\n", status));
-       if (unlikely(!t || !t->lldd_task || !t->dev))
+                       pm8001_printk("ts null\n"));
                return;
+       }
 
        switch (status) {
        case IO_SUCCESS:
@@ -2113,6 +2332,19 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
                if (param == 0) {
                        ts->resp = SAS_TASK_COMPLETE;
                        ts->stat = SAM_STAT_GOOD;
+                       /* check if response is for SEND READ LOG */
+                       if (pm8001_dev &&
+                               (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
+                               /* set new bit for abort_all */
+                               pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
+                               /* clear bit for read log */
+                               pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+                               pm8001_send_abort_all(pm8001_ha, pm8001_dev);
+                               /* Free the tag */
+                               pm8001_tag_free(pm8001_ha, tag);
+                               sas_free_task(t);
+                               return;
+                       }
                } else {
                        u8 len;
                        ts->resp = SAS_TASK_COMPLETE;
@@ -2423,6 +2655,29 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        u32 dev_id = le32_to_cpu(psataPayload->device_id);
        unsigned long flags;
 
+       ccb = &pm8001_ha->ccb_info[tag];
+
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("No CCB !!!. returning\n"));
+       }
+       if (event)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("SATA EVENT 0x%x\n", event));
+
+       /* Check if this is NCQ error */
+       if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+               /* find device using device id */
+               pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
+               /* send read log extension */
+               if (pm8001_dev)
+                       pm8001_send_read_log(pm8001_ha, pm8001_dev);
+               return;
+       }
+
        ccb = &pm8001_ha->ccb_info[tag];
        t = ccb->task;
        pm8001_dev = ccb->device;
@@ -2432,9 +2687,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        if (unlikely(!t || !t->lldd_task || !t->dev))
                return;
        ts = &t->task_status;
-       PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk("port_id = %x,device_id = %x\n",
-               port_id, dev_id));
+       PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+               "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n",
+               port_id, dev_id, tag, event));
        switch (event) {
        case IO_OVERFLOW:
                PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
@@ -2822,8 +3077,8 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
        }
 }
 
-static void
-mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+               void *piomb)
 {
        struct set_dev_state_resp *pPayload =
                (struct set_dev_state_resp *)(piomb + 4);
@@ -2843,8 +3098,7 @@ mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static void
-mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct get_nvm_data_resp *pPayload =
                (struct get_nvm_data_resp *)(piomb + 4);
@@ -2863,8 +3117,8 @@ mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static void
-mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+void
+pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct fw_control_ex    *fw_control_context;
        struct get_nvm_data_resp *pPayload =
@@ -2925,7 +3179,7 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        pm8001_ccb_free(pm8001_ha, tag);
 }
 
-static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct local_phy_ctl_resp *pPayload =
                (struct local_phy_ctl_resp *)(piomb + 4);
@@ -2954,7 +3208,7 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
  * while receive a broadcast(change) primitive just tell the sas
  * layer to discover the changed domain rather than the whole domain.
  */
-static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
+void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
 {
        struct pm8001_phy *phy = &pm8001_ha->phy[i];
        struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -2988,7 +3242,7 @@ static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
 }
 
 /* Get the link rate speed  */
-static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
+void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
 {
        struct sas_phy *sas_phy = phy->sas_phy.phy;
 
@@ -3025,7 +3279,7 @@ static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
  * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame
  * buffer.
  */
-static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
+void pm8001_get_attached_sas_addr(struct pm8001_phy *phy,
        u8 *sas_addr)
 {
        if (phy->sas_phy.frame_rcvd[0] == 0x34
@@ -3067,7 +3321,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
                ((phyId & 0x0F) << 4) | (port_id & 0x0F));
        payload.param0 = cpu_to_le32(param0);
        payload.param1 = cpu_to_le32(param1);
-       mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
 }
 
 static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
@@ -3112,19 +3366,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
                pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
                        PHY_NOTIFY_ENABLE_SPINUP);
                port->port_attached = 1;
-               get_lrate_mode(phy, link_rate);
+               pm8001_get_lrate_mode(phy, link_rate);
                break;
        case SAS_EDGE_EXPANDER_DEVICE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("expander device.\n"));
                port->port_attached = 1;
-               get_lrate_mode(phy, link_rate);
+               pm8001_get_lrate_mode(phy, link_rate);
                break;
        case SAS_FANOUT_EXPANDER_DEVICE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("fanout expander device.\n"));
                port->port_attached = 1;
-               get_lrate_mode(phy, link_rate);
+               pm8001_get_lrate_mode(phy, link_rate);
                break;
        default:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3179,7 +3433,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
                " phy id = %d\n", port_id, phy_id));
        port->port_state =  portstate;
        port->port_attached = 1;
-       get_lrate_mode(phy, link_rate);
+       pm8001_get_lrate_mode(phy, link_rate);
        phy->phy_type |= PORT_TYPE_SATA;
        phy->phy_attached = 1;
        phy->sas_phy.oob_mode = SATA_OOB_MODE;
@@ -3189,7 +3443,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
                sizeof(struct dev_to_host_fis));
        phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
        phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
-       phy->identify.device_type = SATA_DEV;
+       phy->identify.device_type = SAS_SATA_DEV;
        pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
        spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
        pm8001_bytes_dmaed(pm8001_ha, phy_id);
@@ -3260,7 +3514,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
 }
 
 /**
- * mpi_reg_resp -process register device ID response.
+ * pm8001_mpi_reg_resp -process register device ID response.
  * @pm8001_ha: our hba card information
  * @piomb: IO message buffer
  *
@@ -3269,7 +3523,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
  * has assigned, from now,inter-communication with FW is no longer using the
  * SAS address, use device ID which FW assigned.
  */
-static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        u32 status;
        u32 device_id;
@@ -3331,7 +3585,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        return 0;
 }
 
-static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        u32 status;
        u32 device_id;
@@ -3347,8 +3601,13 @@ static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        return 0;
 }
 
-static int
-mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+/**
+ * fw_flash_update_resp - Response from FW for flash update command.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+               void *piomb)
 {
        u32 status;
        struct fw_control_ex    fw_control_context;
@@ -3403,10 +3662,6 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
                break;
        }
        ccb->fw_control_context->fw_control->retcode = status;
-       pci_free_consistent(pm8001_ha->pdev,
-                       fw_control_context.len,
-                       fw_control_context.virtAddr,
-                       fw_control_context.phys_addr);
        complete(pm8001_ha->nvmd_completion);
        ccb->task = NULL;
        ccb->ccb_tag = 0xFFFFFFFF;
@@ -3414,8 +3669,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        return 0;
 }
 
-static int
-mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 {
        u32 status;
        int i;
@@ -3431,8 +3685,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
        return 0;
 }
 
-static int
-mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
        struct sas_task *t;
        struct pm8001_ccb_info *ccb;
@@ -3440,19 +3693,29 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        u32 status ;
        u32 tag, scp;
        struct task_status_struct *ts;
+       struct pm8001_device *pm8001_dev;
 
        struct task_abort_resp *pPayload =
                (struct task_abort_resp *)(piomb + 4);
 
        status = le32_to_cpu(pPayload->status);
        tag = le32_to_cpu(pPayload->tag);
+       if (!tag) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk(" TAG NULL. RETURNING !!!"));
+               return -1;
+       }
+
        scp = le32_to_cpu(pPayload->scp);
        ccb = &pm8001_ha->ccb_info[tag];
        t = ccb->task;
-       PM8001_IO_DBG(pm8001_ha,
-               pm8001_printk(" status = 0x%x\n", status));
-       if (t == NULL)
+       pm8001_dev = ccb->device; /* retrieve device */
+
+       if (!t) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk(" TASK NULL. RETURNING !!!"));
                return -1;
+       }
        ts = &t->task_status;
        if (status != 0)
                PM8001_FAIL_DBG(pm8001_ha,
@@ -3476,7 +3739,15 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        spin_unlock_irqrestore(&t->task_state_lock, flags);
        pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
        mb();
-       t->task_done(t);
+
+       if ((pm8001_dev->id & NCQ_ABORT_ALL_FLAG) && t) {
+               pm8001_tag_free(pm8001_ha, tag);
+               sas_free_task(t);
+               /* clear the flag */
+               pm8001_dev->id &= 0xBFFFFFFF;
+       } else
+               t->task_done(t);
+
        return 0;
 }
 
@@ -3727,17 +3998,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_LOCAL_PHY_CNTRL:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
-               mpi_local_phy_ctl(pm8001_ha, piomb);
+               pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
                break;
        case OPC_OUB_DEV_REGIST:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_DEV_REGIST\n"));
-               mpi_reg_resp(pm8001_ha, piomb);
+               pm8001_mpi_reg_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_DEREG_DEV:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("unregister the device\n"));
-               mpi_dereg_resp(pm8001_ha, piomb);
+               pm8001_mpi_dereg_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GET_DEV_HANDLE:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3775,7 +4046,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_FW_FLASH_UPDATE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
-               mpi_fw_flash_update_resp(pm8001_ha, piomb);
+               pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GPIO_RESPONSE:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3788,17 +4059,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_GENERAL_EVENT:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
-               mpi_general_event(pm8001_ha, piomb);
+               pm8001_mpi_general_event(pm8001_ha, piomb);
                break;
        case OPC_OUB_SSP_ABORT_RSP:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
-               mpi_task_abort_resp(pm8001_ha, piomb);
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_SATA_ABORT_RSP:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
-               mpi_task_abort_resp(pm8001_ha, piomb);
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_SAS_DIAG_MODE_START_END:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3823,17 +4094,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_SMP_ABORT_RSP:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
-               mpi_task_abort_resp(pm8001_ha, piomb);
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GET_NVMD_DATA:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
-               mpi_get_nvmd_resp(pm8001_ha, piomb);
+               pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_SET_NVMD_DATA:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
-               mpi_set_nvmd_resp(pm8001_ha, piomb);
+               pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_DEVICE_HANDLE_REMOVAL:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3842,7 +4113,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        case OPC_OUB_SET_DEVICE_STATE:
                PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
-               mpi_set_dev_state_resp(pm8001_ha, piomb);
+               pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
                break;
        case OPC_OUB_GET_DEVICE_STATE:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3864,7 +4135,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        }
 }
 
-static int process_oq(struct pm8001_hba_info *pm8001_ha)
+static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
        struct outbound_queue_table *circularQ;
        void *pMsg1 = NULL;
@@ -3873,14 +4144,15 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
        unsigned long flags;
 
        spin_lock_irqsave(&pm8001_ha->lock, flags);
-       circularQ = &pm8001_ha->outbnd_q_tbl[0];
+       circularQ = &pm8001_ha->outbnd_q_tbl[vec];
        do {
-               ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+               ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
                if (MPI_IO_STATUS_SUCCESS == ret) {
                        /* process the outbound message */
                        process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
                        /* free the message from the outbound circular buffer */
-                       mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc);
+                       pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
+                                                       circularQ, bc);
                }
                if (MPI_IO_STATUS_BUSY == ret) {
                        /* Update the producer index from SPC */
@@ -3903,7 +4175,7 @@ static const u8 data_dir_flags[] = {
        [PCI_DMA_FROMDEVICE]    = DATA_DIR_IN,/* INBOUND */
        [PCI_DMA_NONE]          = DATA_DIR_NONE,/* NO TRANSFER */
 };
-static void
+void
 pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
 {
        int i;
@@ -3978,7 +4250,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
        smp_cmd.long_smp_req.long_resp_size =
                cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
        build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
-       mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
        return 0;
 
 err_out_2:
@@ -4042,7 +4314,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
                ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
                ssp_cmd.esgl = 0;
        }
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0);
        return ret;
 }
 
@@ -4060,6 +4332,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
        u32 ATAP = 0x0;
        u32 dir;
        struct inbound_queue_table *circularQ;
+       unsigned long flags;
        u32  opc = OPC_INB_SATA_HOST_OPSTART;
        memset(&sata_cmd, 0, sizeof(sata_cmd));
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
@@ -4080,8 +4353,10 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
                        PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
                }
        }
-       if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
+       if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
+               task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
                ncg_tag = hdr_tag;
+       }
        dir = data_dir_flags[task->data_dir] << 8;
        sata_cmd.tag = cpu_to_le32(tag);
        sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -4112,7 +4387,55 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
                sata_cmd.len = cpu_to_le32(task->total_xfer_len);
                sata_cmd.esgl = 0;
        }
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd);
+
+       /* Check for read log for failed drive and return */
+       if (sata_cmd.sata_fis.command == 0x2f) {
+               if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
+                       struct task_status_struct *ts;
+
+                       pm8001_ha_dev->id &= 0xDFFFFFFF;
+                       ts = &task->task_status;
+
+                       spin_lock_irqsave(&task->task_state_lock, flags);
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+                       task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+                       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+                       task->task_state_flags |= SAS_TASK_STATE_DONE;
+                       if (unlikely((task->task_state_flags &
+                                       SAS_TASK_STATE_ABORTED))) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("task 0x%p resp 0x%x "
+                                       " stat 0x%x but aborted by upper layer "
+                                       "\n", task, ts->resp, ts->stat));
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                       } else if (task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/* ditto */
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       } else if (!task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/*ditto*/
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       }
+               }
+       }
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
        return ret;
 }
 
@@ -4142,12 +4465,12 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
        payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
                LINKMODE_AUTO | LINKRATE_15 |
                LINKRATE_30 | LINKRATE_60 | phy_id);
-       payload.sas_identify.dev_type = SAS_END_DEV;
+       payload.sas_identify.dev_type = SAS_END_DEVICE;
        payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
        memcpy(payload.sas_identify.sas_addr,
                pm8001_ha->sas_addr, SAS_ADDR_SIZE);
        payload.sas_identify.phy_id = phy_id;
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
        return ret;
 }
 
@@ -4157,7 +4480,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
  * @num: the inbound queue number
  * @phy_id: the phy id which we wanted to start up.
  */
-static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
        u8 phy_id)
 {
        struct phy_stop_req payload;
@@ -4169,12 +4492,12 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
        memset(&payload, 0, sizeof(payload));
        payload.tag = cpu_to_le32(tag);
        payload.phy_id = cpu_to_le32(phy_id);
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
        return ret;
 }
 
 /**
- * see comments on mpi_reg_resp.
+ * see comments on pm8001_mpi_reg_resp.
  */
 static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u32 flag)
@@ -4204,11 +4527,11 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
        if (flag == 1)
                stp_sspsmp_sata = 0x02; /*direct attached sata */
        else {
-               if (pm8001_dev->dev_type == SATA_DEV)
+               if (pm8001_dev->dev_type == SAS_SATA_DEV)
                        stp_sspsmp_sata = 0x00; /* stp*/
-               else if (pm8001_dev->dev_type == SAS_END_DEV ||
-                       pm8001_dev->dev_type == EDGE_DEV ||
-                       pm8001_dev->dev_type == FANOUT_DEV)
+               else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
+                       pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                       pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
                        stp_sspsmp_sata = 0x01; /*ssp or smp*/
        }
        if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
@@ -4228,14 +4551,14 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
                cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
        memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
                SAS_ADDR_SIZE);
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return rc;
 }
 
 /**
- * see comments on mpi_reg_resp.
+ * see comments on pm8001_mpi_reg_resp.
  */
-static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
        u32 device_id)
 {
        struct dereg_dev_req payload;
@@ -4249,7 +4572,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
        payload.device_id = cpu_to_le32(device_id);
        PM8001_MSG_DBG(pm8001_ha,
                pm8001_printk("unregister device device_id = %d\n", device_id));
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return ret;
 }
 
@@ -4272,7 +4595,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
        payload.tag = cpu_to_le32(1);
        payload.phyop_phyid =
                cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return ret;
 }
 
@@ -4296,11 +4619,11 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
  * @stat: stat.
  */
 static irqreturn_t
-pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
+pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
-       pm8001_chip_interrupt_disable(pm8001_ha);
-       process_oq(pm8001_ha);
-       pm8001_chip_interrupt_enable(pm8001_ha);
+       pm8001_chip_interrupt_disable(pm8001_ha, vec);
+       process_oq(pm8001_ha, vec);
+       pm8001_chip_interrupt_enable(pm8001_ha, vec);
        return IRQ_HANDLED;
 }
 
@@ -4322,7 +4645,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
                task_abort.device_id = cpu_to_le32(dev_id);
                task_abort.tag = cpu_to_le32(cmd_tag);
        }
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
        return ret;
 }
 
@@ -4331,16 +4654,17 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc,
  * @task: the task we wanted to aborted.
  * @flag: the abort flag.
  */
-static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag)
 {
        u32 opc, device_id;
        int rc = TMF_RESP_FUNC_FAILED;
-       PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag"
-               " = %x", cmd_tag, task_tag));
-       if (pm8001_dev->dev_type == SAS_END_DEV)
+       PM8001_EH_DBG(pm8001_ha,
+               pm8001_printk("cmd_tag = %x, abort task tag = 0x%x",
+                       cmd_tag, task_tag));
+       if (pm8001_dev->dev_type == SAS_END_DEVICE)
                opc = OPC_INB_SSP_ABORT;
-       else if (pm8001_dev->dev_type == SATA_DEV)
+       else if (pm8001_dev->dev_type == SAS_SATA_DEV)
                opc = OPC_INB_SATA_ABORT;
        else
                opc = OPC_INB_SMP_ABORT;/* SMP */
@@ -4358,7 +4682,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
  * @ccb: the ccb information.
  * @tmf: task management function.
  */
-static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
 {
        struct sas_task *task = ccb->task;
@@ -4376,11 +4700,11 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
        memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);
        sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag);
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);
        return ret;
 }
 
-static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        void *payload)
 {
        u32 opc = OPC_INB_GET_NVMD_DATA;
@@ -4397,7 +4721,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
        if (!fw_control_context)
                return -ENOMEM;
-       fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0];
+       fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific;
        fw_control_context->len = ioctl_payload->length;
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
        memset(&nvmd_req, 0, sizeof(nvmd_req));
@@ -4456,11 +4780,11 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        default:
                break;
        }
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
        return rc;
 }
 
-static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
+int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        void *payload)
 {
        u32 opc = OPC_INB_SET_NVMD_DATA;
@@ -4479,7 +4803,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
                return -ENOMEM;
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
        memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr,
-               ioctl_payload->func_specific,
+               &ioctl_payload->func_specific,
                ioctl_payload->length);
        memset(&nvmd_req, 0, sizeof(nvmd_req));
        rc = pm8001_tag_alloc(pm8001_ha, &tag);
@@ -4536,7 +4860,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
        default:
                break;
        }
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
        return rc;
 }
 
@@ -4545,7 +4869,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
  * @pm8001_ha: our hba card information.
  * @fw_flash_updata_info: firmware flash update param
  */
-static int
+int
 pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
        void *fw_flash_updata_info, u32 tag)
 {
@@ -4567,11 +4891,11 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
                cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
        payload.sgl_addr_hi =
                cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
-       ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return ret;
 }
 
-static int
+int
 pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        void *payload)
 {
@@ -4581,29 +4905,14 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        int rc;
        u32 tag;
        struct pm8001_ccb_info *ccb;
-       void *buffer = NULL;
-       dma_addr_t phys_addr;
-       u32 phys_addr_hi;
-       u32 phys_addr_lo;
+       void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr;
+       dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr;
        struct pm8001_ioctl_payload *ioctl_payload = payload;
 
        fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL);
        if (!fw_control_context)
                return -ENOMEM;
-       fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0];
-       if (fw_control->len != 0) {
-               if (pm8001_mem_alloc(pm8001_ha->pdev,
-                       (void **)&buffer,
-                       &phys_addr,
-                       &phys_addr_hi,
-                       &phys_addr_lo,
-                       fw_control->len, 0) != 0) {
-                               PM8001_FAIL_DBG(pm8001_ha,
-                                       pm8001_printk("Mem alloc failure\n"));
-                               kfree(fw_control_context);
-                               return -ENOMEM;
-               }
-       }
+       fw_control = (struct fw_control_info *)&ioctl_payload->func_specific;
        memcpy(buffer, fw_control->buffer, fw_control->len);
        flash_update_info.sgl.addr = cpu_to_le64(phys_addr);
        flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len);
@@ -4613,6 +4922,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        flash_update_info.total_image_len = fw_control->size;
        fw_control_context->fw_control = fw_control;
        fw_control_context->virtAddr = buffer;
+       fw_control_context->phys_addr = phys_addr;
        fw_control_context->len = fw_control->len;
        rc = pm8001_tag_alloc(pm8001_ha, &tag);
        if (rc) {
@@ -4627,7 +4937,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
        return rc;
 }
 
-static int
+int
 pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
        struct pm8001_device *pm8001_dev, u32 state)
 {
@@ -4648,7 +4958,7 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
        payload.tag = cpu_to_le32(tag);
        payload.device_id = cpu_to_le32(pm8001_dev->device_id);
        payload.nds = cpu_to_le32(state);
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return rc;
 
 }
@@ -4673,7 +4983,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
        payload.SSAHOLT = cpu_to_le32(0xd << 25);
        payload.sata_hol_tmo = cpu_to_le32(80);
        payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
-       rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
        return rc;
 
 }
@@ -4706,4 +5016,3 @@ const struct pm8001_dispatch pm8001_8001_dispatch = {
        .set_dev_state_req      = pm8001_chip_set_dev_state_req,
        .sas_re_init_req        = pm8001_chip_sas_re_initialization,
 };
-
index d437309cb1e1c30de4f75aaafc7fb8dd0884a724..d7c1e2034226ea7667201a2a1474400f52994b5d 100644 (file)
 #define LINKRATE_30                    (0x02 << 8)
 #define LINKRATE_60                    (0x04 << 8)
 
+/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
+#define GSM_SM_BASE                    0x4F0000
 struct mpi_msg_hdr{
        __le32  header; /* Bits [11:0]  - Message operation code */
        /* Bits [15:12] - Message Category */
@@ -298,7 +300,7 @@ struct local_phy_ctl_resp {
 
 
 #define OP_BITS 0x0000FF00
-#define ID_BITS 0x0000000F
+#define ID_BITS 0x000000FF
 
 /*
  * brief the data structure of PORT Control Command
index 3d5e522e00fc8ae93a4c9344731f960fa42c0006..e4b9bc7f5410fa5e38a9df530cba183652f1962a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
 
 static struct scsi_transport_template *pm8001_stt;
 
+/**
+ * chip info structure to identify chip key functionality as
+ * encryption available/not, no of ports, hw specific function ref
+ */
 static const struct pm8001_chip_info pm8001_chips[] = {
-       [chip_8001] = {  8, &pm8001_8001_dispatch,},
+       [chip_8001] = {0,  8, &pm8001_8001_dispatch,},
+       [chip_8008] = {0,  8, &pm8001_80xx_dispatch,},
+       [chip_8009] = {1,  8, &pm8001_80xx_dispatch,},
+       [chip_8018] = {0,  16, &pm8001_80xx_dispatch,},
+       [chip_8019] = {1,  16, &pm8001_80xx_dispatch,},
 };
 static int pm8001_id;
 
@@ -155,37 +163,75 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
 }
 
 #ifdef PM8001_USE_TASKLET
+
+/**
+ * tasklet for 64 msi-x interrupt handler
+ * @opaque: the passed general host adapter struct
+ * Note: pm8001_tasklet is common for pm8001 & pm80xx
+ */
 static void pm8001_tasklet(unsigned long opaque)
 {
        struct pm8001_hba_info *pm8001_ha;
+       u32 vec;
        pm8001_ha = (struct pm8001_hba_info *)opaque;
        if (unlikely(!pm8001_ha))
                BUG_ON(1);
-       PM8001_CHIP_DISP->isr(pm8001_ha);
+       vec = pm8001_ha->int_vector;
+       PM8001_CHIP_DISP->isr(pm8001_ha, vec);
+}
+#endif
+
+static struct  pm8001_hba_info *outq_to_hba(u8 *outq)
+{
+       return container_of((outq - *outq), struct pm8001_hba_info, outq[0]);
 }
+
+/**
+ * pm8001_interrupt_handler_msix - main MSIX interrupt handler.
+ * It obtains the vector number and calls the equivalent bottom
+ * half or services directly.
+ * @opaque: the passed outbound queue/vector. Host structure is
+ * retrieved from the same.
+ */
+static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)
+{
+       struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque);
+       u8 outq = *(u8 *)opaque;
+       irqreturn_t ret = IRQ_HANDLED;
+       if (unlikely(!pm8001_ha))
+               return IRQ_NONE;
+       if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
+               return IRQ_NONE;
+       pm8001_ha->int_vector = outq;
+#ifdef PM8001_USE_TASKLET
+       tasklet_schedule(&pm8001_ha->tasklet);
+#else
+       ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq);
 #endif
+       return ret;
+}
 
+/**
+ * pm8001_interrupt_handler_intx - main INTx interrupt handler.
+ * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure.
+ */
 
- /**
-  * pm8001_interrupt - when HBA originate a interrupt,we should invoke this
-  * dispatcher to handle each case.
-  * @irq: irq number.
-  * @opaque: the passed general host adapter struct
-  */
-static irqreturn_t pm8001_interrupt(int irq, void *opaque)
+static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)
 {
        struct pm8001_hba_info *pm8001_ha;
        irqreturn_t ret = IRQ_HANDLED;
-       struct sas_ha_struct *sha = opaque;
+       struct sas_ha_struct *sha = dev_id;
        pm8001_ha = sha->lldd_ha;
        if (unlikely(!pm8001_ha))
                return IRQ_NONE;
        if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
                return IRQ_NONE;
+
+       pm8001_ha->int_vector = 0;
 #ifdef PM8001_USE_TASKLET
        tasklet_schedule(&pm8001_ha->tasklet);
 #else
-       ret = PM8001_CHIP_DISP->isr(pm8001_ha);
+       ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);
 #endif
        return ret;
 }
@@ -195,10 +241,14 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque)
  * @pm8001_ha:our hba structure.
  *
  */
-static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
+                       const struct pci_device_id *ent)
 {
        int i;
        spin_lock_init(&pm8001_ha->lock);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("pm8001_alloc: PHY:%x\n",
+                               pm8001_ha->chip->n_phy));
        for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
                pm8001_phy_init(pm8001_ha, i);
                pm8001_ha->port[i].wide_port_phymap = 0;
@@ -222,30 +272,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
        pm8001_ha->memoryMap.region[IOP].alignment = 32;
 
-       /* MPI Memory region 3 for consumer Index of inbound queues */
-       pm8001_ha->memoryMap.region[CI].num_elements = 1;
-       pm8001_ha->memoryMap.region[CI].element_size = 4;
-       pm8001_ha->memoryMap.region[CI].total_len = 4;
-       pm8001_ha->memoryMap.region[CI].alignment = 4;
-
-       /* MPI Memory region 4 for producer Index of outbound queues */
-       pm8001_ha->memoryMap.region[PI].num_elements = 1;
-       pm8001_ha->memoryMap.region[PI].element_size = 4;
-       pm8001_ha->memoryMap.region[PI].total_len = 4;
-       pm8001_ha->memoryMap.region[PI].alignment = 4;
-
-       /* MPI Memory region 5 inbound queues */
-       pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE;
-       pm8001_ha->memoryMap.region[IB].element_size = 64;
-       pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64;
-       pm8001_ha->memoryMap.region[IB].alignment = 64;
-
-       /* MPI Memory region 6 outbound queues */
-       pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE;
-       pm8001_ha->memoryMap.region[OB].element_size = 64;
-       pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64;
-       pm8001_ha->memoryMap.region[OB].alignment = 64;
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+               /* MPI Memory region 3 for consumer Index of inbound queues */
+               pm8001_ha->memoryMap.region[CI+i].num_elements = 1;
+               pm8001_ha->memoryMap.region[CI+i].element_size = 4;
+               pm8001_ha->memoryMap.region[CI+i].total_len = 4;
+               pm8001_ha->memoryMap.region[CI+i].alignment = 4;
+
+               if ((ent->driver_data) != chip_8001) {
+                       /* MPI Memory region 5 inbound queues */
+                       pm8001_ha->memoryMap.region[IB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[IB+i].element_size = 128;
+                       pm8001_ha->memoryMap.region[IB+i].total_len =
+                                               PM8001_MPI_QUEUE * 128;
+                       pm8001_ha->memoryMap.region[IB+i].alignment = 128;
+               } else {
+                       pm8001_ha->memoryMap.region[IB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[IB+i].element_size = 64;
+                       pm8001_ha->memoryMap.region[IB+i].total_len =
+                                               PM8001_MPI_QUEUE * 64;
+                       pm8001_ha->memoryMap.region[IB+i].alignment = 64;
+               }
+       }
+
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+               /* MPI Memory region 4 for producer Index of outbound queues */
+               pm8001_ha->memoryMap.region[PI+i].num_elements = 1;
+               pm8001_ha->memoryMap.region[PI+i].element_size = 4;
+               pm8001_ha->memoryMap.region[PI+i].total_len = 4;
+               pm8001_ha->memoryMap.region[PI+i].alignment = 4;
+
+               if (ent->driver_data != chip_8001) {
+                       /* MPI Memory region 6 Outbound queues */
+                       pm8001_ha->memoryMap.region[OB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[OB+i].element_size = 128;
+                       pm8001_ha->memoryMap.region[OB+i].total_len =
+                                               PM8001_MPI_QUEUE * 128;
+                       pm8001_ha->memoryMap.region[OB+i].alignment = 128;
+               } else {
+                       /* MPI Memory region 6 Outbound queues */
+                       pm8001_ha->memoryMap.region[OB+i].num_elements =
+                                               PM8001_MPI_QUEUE;
+                       pm8001_ha->memoryMap.region[OB+i].element_size = 64;
+                       pm8001_ha->memoryMap.region[OB+i].total_len =
+                                               PM8001_MPI_QUEUE * 64;
+                       pm8001_ha->memoryMap.region[OB+i].alignment = 64;
+               }
 
+       }
        /* Memory region write DMA*/
        pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
        pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
@@ -264,6 +341,9 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
                sizeof(struct pm8001_ccb_info);
 
+       /* Memory region for fw flash */
+       pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096;
+
        for (i = 0; i < USI_MAX_MEMCNT; i++) {
                if (pm8001_mem_alloc(pm8001_ha->pdev,
                        &pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -281,7 +361,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
 
        pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
        for (i = 0; i < PM8001_MAX_DEVICES; i++) {
-               pm8001_ha->devices[i].dev_type = NO_DEVICE;
+               pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED;
                pm8001_ha->devices[i].id = i;
                pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
                pm8001_ha->devices[i].running_req = 0;
@@ -339,10 +419,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
                                ioremap(pm8001_ha->io_mem[logicalBar].membase,
                                pm8001_ha->io_mem[logicalBar].memsize);
                        PM8001_INIT_DBG(pm8001_ha,
-                               pm8001_printk("PCI: bar %d, logicalBar %d "
-                               "virt_addr=%lx,len=%d\n", bar, logicalBar,
-                               (unsigned long)
-                               pm8001_ha->io_mem[logicalBar].memvirtaddr,
+                               pm8001_printk("PCI: bar %d, logicalBar %d ",
+                               bar, logicalBar));
+                       PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                               "base addr %llx virt_addr=%llx len=%d\n",
+                               (u64)pm8001_ha->io_mem[logicalBar].membase,
+                               (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr,
                                pm8001_ha->io_mem[logicalBar].memsize));
                } else {
                        pm8001_ha->io_mem[logicalBar].membase   = 0;
@@ -361,8 +443,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
  * @shost: scsi host struct which has been initialized before.
  */
 static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
-                                               u32 chip_id,
-                                               struct Scsi_Host *shost)
+                                const struct pci_device_id *ent,
+                               struct Scsi_Host *shost)
+
 {
        struct pm8001_hba_info *pm8001_ha;
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -374,7 +457,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
 
        pm8001_ha->pdev = pdev;
        pm8001_ha->dev = &pdev->dev;
-       pm8001_ha->chip_id = chip_id;
+       pm8001_ha->chip_id = ent->driver_data;
        pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
        pm8001_ha->irq = pdev->irq;
        pm8001_ha->sas = sha;
@@ -382,12 +465,22 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
        pm8001_ha->id = pm8001_id++;
        pm8001_ha->logging_level = 0x01;
        sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
+       /* IOMB size is 128 for 8088/89 controllers */
+       if (pm8001_ha->chip_id != chip_8001)
+               pm8001_ha->iomb_size = IOMB_SIZE_SPCV;
+       else
+               pm8001_ha->iomb_size = IOMB_SIZE_SPC;
+
 #ifdef PM8001_USE_TASKLET
+       /**
+       * default tasklet for non msi-x interrupt handler/first msi-x
+       * interrupt handler
+       **/
        tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
-               (unsigned long)pm8001_ha);
+                       (unsigned long)pm8001_ha);
 #endif
        pm8001_ioremap(pm8001_ha);
-       if (!pm8001_alloc(pm8001_ha))
+       if (!pm8001_alloc(pm8001_ha, ent))
                return pm8001_ha;
        pm8001_free(pm8001_ha);
        return NULL;
@@ -512,21 +605,50 @@ static void  pm8001_post_sas_ha_init(struct Scsi_Host *shost,
  */
 static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
 {
-       u8 i;
+       u8 i, j;
 #ifdef PM8001_READ_VPD
+       /* For new SPC controllers WWN is stored in flash vpd
+       *  For SPC/SPCve controllers WWN is stored in EEPROM
+       *  For Older SPC WWN is stored in NVMD
+       */
        DECLARE_COMPLETION_ONSTACK(completion);
        struct pm8001_ioctl_payload payload;
+       u16 deviceid;
+       pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
        pm8001_ha->nvmd_completion = &completion;
-       payload.minor_function = 0;
-       payload.length = 128;
-       payload.func_specific = kzalloc(128, GFP_KERNEL);
+
+       if (pm8001_ha->chip_id == chip_8001) {
+               if (deviceid == 0x8081) {
+                       payload.minor_function = 4;
+                       payload.length = 4096;
+               } else {
+                       payload.minor_function = 0;
+                       payload.length = 128;
+               }
+       } else {
+               payload.minor_function = 1;
+               payload.length = 4096;
+       }
+       payload.offset = 0;
+       payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
        PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
        wait_for_completion(&completion);
+
+       for (i = 0, j = 0; i <= 7; i++, j++) {
+               if (pm8001_ha->chip_id == chip_8001) {
+                       if (deviceid == 0x8081)
+                               pm8001_ha->sas_addr[j] =
+                                       payload.func_specific[0x704 + i];
+               } else
+                       pm8001_ha->sas_addr[j] =
+                                       payload.func_specific[0x804 + i];
+       }
+
        for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
-               memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
-                       SAS_ADDR_SIZE);
+               memcpy(&pm8001_ha->phy[i].dev_sas_addr,
+                       pm8001_ha->sas_addr, SAS_ADDR_SIZE);
                PM8001_INIT_DBG(pm8001_ha,
-                       pm8001_printk("phy %d sas_addr = %016llx \n", i,
+                       pm8001_printk("phy %d sas_addr = %016llx\n", i,
                        pm8001_ha->phy[i].dev_sas_addr));
        }
 #else
@@ -547,31 +669,50 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
  * @chip_info: our ha struct.
  * @irq_handler: irq_handler
  */
-static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
-       irq_handler_t irq_handler)
+static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
 {
        u32 i = 0, j = 0;
-       u32 number_of_intr = 1;
+       u32 number_of_intr;
        int flag = 0;
        u32 max_entry;
        int rc;
+       static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
+
+       /* SPCv controllers supports 64 msi-x */
+       if (pm8001_ha->chip_id == chip_8001) {
+               number_of_intr = 1;
+               flag |= IRQF_DISABLED;
+       } else {
+               number_of_intr = PM8001_MAX_MSIX_VEC;
+               flag &= ~IRQF_SHARED;
+               flag |= IRQF_DISABLED;
+       }
+
        max_entry = sizeof(pm8001_ha->msix_entries) /
                sizeof(pm8001_ha->msix_entries[0]);
-       flag |= IRQF_DISABLED;
        for (i = 0; i < max_entry ; i++)
                pm8001_ha->msix_entries[i].entry = i;
        rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
                number_of_intr);
        pm8001_ha->number_of_intr = number_of_intr;
        if (!rc) {
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "pci_enable_msix request ret:%d no of intr %d\n",
+                                       rc, pm8001_ha->number_of_intr));
+
+               for (i = 0; i < number_of_intr; i++)
+                       pm8001_ha->outq[i] = i;
+
                for (i = 0; i < number_of_intr; i++) {
+                       snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
+                                       DRV_NAME"%d", i);
                        if (request_irq(pm8001_ha->msix_entries[i].vector,
-                               irq_handler, flag, DRV_NAME,
-                               SHOST_TO_SAS_HA(pm8001_ha->shost))) {
+                               pm8001_interrupt_handler_msix, flag,
+                               intr_drvname[i], &pm8001_ha->outq[i])) {
                                for (j = 0; j < i; j++)
                                        free_irq(
                                        pm8001_ha->msix_entries[j].vector,
-                                       SHOST_TO_SAS_HA(pm8001_ha->shost));
+                                       &pm8001_ha->outq[j]);
                                pci_disable_msix(pm8001_ha->pdev);
                                break;
                        }
@@ -588,22 +729,24 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
 static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
 {
        struct pci_dev *pdev;
-       irq_handler_t irq_handler = pm8001_interrupt;
        int rc;
 
        pdev = pm8001_ha->pdev;
 
 #ifdef PM8001_USE_MSIX
        if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
-               return pm8001_setup_msix(pm8001_ha, irq_handler);
-       else
+               return pm8001_setup_msix(pm8001_ha);
+       else {
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("MSIX not supported!!!\n"));
                goto intx;
+       }
 #endif
 
 intx:
        /* initialize the INT-X interrupt */
-       rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
-               SHOST_TO_SAS_HA(pm8001_ha->shost));
+       rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED,
+               DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost));
        return rc;
 }
 
@@ -621,12 +764,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
 {
        unsigned int rc;
        u32     pci_reg;
+       u8      i = 0;
        struct pm8001_hba_info *pm8001_ha;
        struct Scsi_Host *shost = NULL;
        const struct pm8001_chip_info *chip;
 
        dev_printk(KERN_INFO, &pdev->dev,
-               "pm8001: driver version %s\n", DRV_VERSION);
+               "pm80xx: driver version %s\n", DRV_VERSION);
        rc = pci_enable_device(pdev);
        if (rc)
                goto err_out_enable;
@@ -665,25 +809,39 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
                goto err_out_free;
        }
        pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
-       pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost);
+       /* ent->driver variable is used to differentiate between controllers */
+       pm8001_ha = pm8001_pci_alloc(pdev, ent, shost);
        if (!pm8001_ha) {
                rc = -ENOMEM;
                goto err_out_free;
        }
        list_add_tail(&pm8001_ha->list, &hba_list);
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
        rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
-       if (rc)
+       if (rc) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "chip_init failed [ret: %d]\n", rc));
                goto err_out_ha_free;
+       }
 
        rc = scsi_add_host(shost, &pdev->dev);
        if (rc)
                goto err_out_ha_free;
        rc = pm8001_request_irq(pm8001_ha);
-       if (rc)
+       if (rc) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "pm8001_request_irq failed [ret: %d]\n", rc));
                goto err_out_shost;
+       }
+
+       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
+       if (pm8001_ha->chip_id != chip_8001) {
+               for (i = 1; i < pm8001_ha->number_of_intr; i++)
+                       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
+               /* setup thermal configuration. */
+               pm80xx_set_thermal_config(pm8001_ha);
+       }
 
-       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
        pm8001_init_sas_add(pm8001_ha);
        pm8001_post_sas_ha_init(shost, chip);
        rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
@@ -719,14 +877,15 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
        sas_remove_host(pm8001_ha->shost);
        list_del(&pm8001_ha->list);
        scsi_remove_host(pm8001_ha->shost);
-       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 
 #ifdef PM8001_USE_MSIX
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
                synchronize_irq(pm8001_ha->msix_entries[i].vector);
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               free_irq(pm8001_ha->msix_entries[i].vector, sha);
+               free_irq(pm8001_ha->msix_entries[i].vector,
+                               &pm8001_ha->outq[i]);
        pci_disable_msix(pdev);
 #else
        free_irq(pm8001_ha->irq, sha);
@@ -763,13 +922,14 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                printk(KERN_ERR " PCI PM not supported\n");
                return -ENODEV;
        }
-       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 #ifdef PM8001_USE_MSIX
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
                synchronize_irq(pm8001_ha->msix_entries[i].vector);
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               free_irq(pm8001_ha->msix_entries[i].vector, sha);
+               free_irq(pm8001_ha->msix_entries[i].vector,
+                               &pm8001_ha->outq[i]);
        pci_disable_msix(pdev);
 #else
        free_irq(pm8001_ha->irq, sha);
@@ -798,6 +958,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
        struct sas_ha_struct *sha = pci_get_drvdata(pdev);
        struct pm8001_hba_info *pm8001_ha;
        int rc;
+       u8 i = 0;
        u32 device_state;
        pm8001_ha = sha->lldd_ha;
        device_state = pdev->current_state;
@@ -820,19 +981,33 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
        if (rc)
                goto err_out_disable;
 
-       PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
+       /* chip soft rst only for spc */
+       if (pm8001_ha->chip_id == chip_8001) {
+               PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("chip soft reset successful\n"));
+       }
        rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
        if (rc)
                goto err_out_disable;
-       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
+
+       /* disable all the interrupt bits */
+       PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF);
+
        rc = pm8001_request_irq(pm8001_ha);
        if (rc)
                goto err_out_disable;
-       #ifdef PM8001_USE_TASKLET
+#ifdef PM8001_USE_TASKLET
+       /* default tasklet for non msi-x interrupt handler/first msi-x
+       * interrupt handler */
        tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
-                   (unsigned long)pm8001_ha);
-       #endif
-       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
+                       (unsigned long)pm8001_ha);
+#endif
+       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);
+       if (pm8001_ha->chip_id != chip_8001) {
+               for (i = 1; i < pm8001_ha->number_of_intr; i++)
+                       PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
+       }
        scsi_unblock_requests(pm8001_ha->shost);
        return 0;
 
@@ -843,14 +1018,45 @@ err_out_enable:
        return rc;
 }
 
+/* update of pci device, vendor id and driver data with
+ * unique value for each of the controller
+ */
 static struct pci_device_id pm8001_pci_table[] = {
-       {
-               PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
-       },
+       { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
        {
                PCI_DEVICE(0x117c, 0x0042),
                .driver_data = chip_8001
        },
+       /* Support for SPC/SPCv/SPCve controllers */
+       { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },
+       { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },
+       { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8081,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8088,
+               PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },
+       { PCI_VENDOR_ID_ADAPTEC2, 0x8089,
+               PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 },
        {} /* terminate list */
 };
 
@@ -870,7 +1076,7 @@ static int __init pm8001_init(void)
 {
        int rc = -ENOMEM;
 
-       pm8001_wq = alloc_workqueue("pm8001", 0, 0);
+       pm8001_wq = alloc_workqueue("pm80xx", 0, 0);
        if (!pm8001_wq)
                goto err;
 
@@ -902,7 +1108,8 @@ module_init(pm8001_init);
 module_exit(pm8001_exit);
 
 MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
-MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver");
+MODULE_DESCRIPTION(
+               "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver");
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
index b961112395d5e053145ba6f83ed4bf7e5058c5c5..a85d73de7c80c730260362ae56a7688189459ee6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -68,7 +68,7 @@ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag)
        clear_bit(tag, bitmap);
 }
 
-static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
+void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
 {
        pm8001_tag_clear(pm8001_ha, tag);
 }
@@ -212,10 +212,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
                break;
        case PHY_FUNC_GET_EVENTS:
                spin_lock_irqsave(&pm8001_ha->lock, flags);
-               if (-1 == pm8001_bar4_shift(pm8001_ha,
+               if (pm8001_ha->chip_id == chip_8001) {
+                       if (-1 == pm8001_bar4_shift(pm8001_ha,
                                        (phy_id < 4) ? 0x30000 : 0x40000)) {
-                       spin_unlock_irqrestore(&pm8001_ha->lock, flags);
-                       return -EINVAL;
+                               spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+                               return -EINVAL;
+                       }
                }
                {
                        struct sas_phy *phy = sas_phy->phy;
@@ -228,7 +230,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
                        phy->loss_of_dword_sync_count = qp[3];
                        phy->phy_reset_problem_count = qp[4];
                }
-               pm8001_bar4_shift(pm8001_ha, 0);
+               if (pm8001_ha->chip_id == chip_8001)
+                       pm8001_bar4_shift(pm8001_ha, 0);
                spin_unlock_irqrestore(&pm8001_ha->lock, flags);
                return 0;
        default:
@@ -249,7 +252,9 @@ void pm8001_scan_start(struct Scsi_Host *shost)
        struct pm8001_hba_info *pm8001_ha;
        struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
        pm8001_ha = sha->lldd_ha;
-       PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
+       /* SAS_RE_INITIALIZATION not available in SPCv/ve */
+       if (pm8001_ha->chip_id == chip_8001)
+               PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
        for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
                PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
 }
@@ -352,7 +357,7 @@ static int sas_find_local_port_id(struct domain_device *dev)
   * @tmf: the task management IU
   */
 #define DEV_IS_GONE(pm8001_dev)        \
-       ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)))
+       ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
 static int pm8001_task_exec(struct sas_task *task, const int num,
        gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
 {
@@ -370,7 +375,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
                struct task_status_struct *tsm = &t->task_status;
                tsm->resp = SAS_TASK_UNDELIVERED;
                tsm->stat = SAS_PHY_DOWN;
-               if (dev->dev_type != SATA_DEV)
+               if (dev->dev_type != SAS_SATA_DEV)
                        t->task_done(t);
                return 0;
        }
@@ -548,7 +553,7 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
 {
        u32 dev;
        for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
-               if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) {
+               if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
                        pm8001_ha->devices[dev].id = dev;
                        return &pm8001_ha->devices[dev];
                }
@@ -560,13 +565,31 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
        }
        return NULL;
 }
+/**
+  * pm8001_find_dev - find a matching pm8001_device
+  * @pm8001_ha: our hba card information
+  */
+struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
+                                       u32 device_id)
+{
+       u32 dev;
+       for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
+               if (pm8001_ha->devices[dev].device_id == device_id)
+                       return &pm8001_ha->devices[dev];
+       }
+       if (dev == PM8001_MAX_DEVICES) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
+                               "DEVICE FOUND !!!\n"));
+       }
+       return NULL;
+}
 
 static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
 {
        u32 id = pm8001_dev->id;
        memset(pm8001_dev, 0, sizeof(*pm8001_dev));
        pm8001_dev->id = id;
-       pm8001_dev->dev_type = NO_DEVICE;
+       pm8001_dev->dev_type = SAS_PHY_UNUSED;
        pm8001_dev->device_id = PM8001_MAX_DEVICES;
        pm8001_dev->sas_device = NULL;
 }
@@ -624,7 +647,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
                        res = -1;
                }
        } else {
-               if (dev->dev_type == SATA_DEV) {
+               if (dev->dev_type == SAS_SATA_DEV) {
                        pm8001_device->attached_phy =
                                dev->rphy->identify.phy_identifier;
                                flag = 1; /* directly sata*/
@@ -634,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
        PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
        spin_unlock_irqrestore(&pm8001_ha->lock, flags);
        wait_for_completion(&completion);
-       if (dev->dev_type == SAS_END_DEV)
+       if (dev->dev_type == SAS_END_DEVICE)
                msleep(50);
        pm8001_ha->flags = PM8001F_RUN_TIME;
        return 0;
@@ -648,7 +671,7 @@ int pm8001_dev_found(struct domain_device *dev)
        return pm8001_dev_found_notify(dev);
 }
 
-static void pm8001_task_done(struct sas_task *task)
+void pm8001_task_done(struct sas_task *task)
 {
        if (!del_timer(&task->slow_task->timer))
                return;
@@ -904,7 +927,7 @@ void pm8001_open_reject_retry(
                struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
 
                pm8001_dev = ccb->device;
-               if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))
+               if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
                        continue;
                if (!device_to_close) {
                        uintptr_t d = (uintptr_t)pm8001_dev
@@ -995,6 +1018,72 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
        return rc;
 }
 
+/*
+* This function handle the IT_NEXUS_XXX event or completion
+* status code for SSP/SATA/SMP I/O request.
+*/
+int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
+{
+       int rc = TMF_RESP_FUNC_FAILED;
+       struct pm8001_device *pm8001_dev;
+       struct pm8001_hba_info *pm8001_ha;
+       struct sas_phy *phy;
+       u32 device_id = 0;
+
+       if (!dev || !dev->lldd_dev)
+               return -1;
+
+       pm8001_dev = dev->lldd_dev;
+       device_id = pm8001_dev->device_id;
+       pm8001_ha = pm8001_find_ha_by_dev(dev);
+
+       PM8001_EH_DBG(pm8001_ha,
+                       pm8001_printk("I_T_Nexus handler invoked !!"));
+
+       phy = sas_get_local_phy(dev);
+
+       if (dev_is_sata(dev)) {
+               DECLARE_COMPLETION_ONSTACK(completion_setstate);
+               if (scsi_is_sas_phy_local(phy)) {
+                       rc = 0;
+                       goto out;
+               }
+               /* send internal ssp/sata/smp abort command to FW */
+               rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+                                                       dev, 1, 0);
+               msleep(100);
+
+               /* deregister the target device */
+               pm8001_dev_gone_notify(dev);
+               msleep(200);
+
+               /*send phy reset to hard reset target */
+               rc = sas_phy_reset(phy, 1);
+               msleep(2000);
+               pm8001_dev->setds_completion = &completion_setstate;
+
+               wait_for_completion(&completion_setstate);
+       } else {
+               /* send internal ssp/sata/smp abort command to FW */
+               rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
+                                                       dev, 1, 0);
+               msleep(100);
+
+               /* deregister the target device */
+               pm8001_dev_gone_notify(dev);
+               msleep(200);
+
+               /*send phy reset to hard reset target */
+               rc = sas_phy_reset(phy, 1);
+               msleep(2000);
+       }
+       PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
+               pm8001_dev->device_id, rc));
+out:
+       sas_put_local_phy(phy);
+
+       return rc;
+}
 /* mandatory SAM-3, the task reset the specified LUN*/
 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
 {
index 11008205aeb331c9b86be09deb8c26733cd133b2..570819464d90b40568477eac1e4c283ae10c3169 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
+ * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
  *
  * Copyright (c) 2008-2009 USI Co., Ltd.
  * All rights reserved.
@@ -57,8 +57,8 @@
 #include <linux/atomic.h>
 #include "pm8001_defs.h"
 
-#define DRV_NAME               "pm8001"
-#define DRV_VERSION            "0.1.36"
+#define DRV_NAME               "pm80xx"
+#define DRV_VERSION            "0.1.37"
 #define PM8001_FAIL_LOGGING    0x01 /* Error message logging */
 #define PM8001_INIT_LOGGING    0x02 /* driver init logging */
 #define PM8001_DISC_LOGGING    0x04 /* discovery layer logging */
@@ -66,8 +66,8 @@
 #define PM8001_EH_LOGGING      0x10 /* libsas EH function logging*/
 #define PM8001_IOCTL_LOGGING   0x20 /* IOCTL message logging */
 #define PM8001_MSG_LOGGING     0x40 /* misc message logging */
-#define pm8001_printk(format, arg...)  printk(KERN_INFO "%s %d:" format,\
-                               __func__, __LINE__, ## arg)
+#define pm8001_printk(format, arg...)  printk(KERN_INFO "pm80xx %s %d:" \
+                       format, __func__, __LINE__, ## arg)
 #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD)  \
 do {                                           \
        if (unlikely(HBA->logging_level & LEVEL))       \
@@ -103,11 +103,12 @@ do {                                              \
 #define PM8001_READ_VPD
 
 
-#define DEV_IS_EXPANDER(type)  ((type == EDGE_DEV) || (type == FANOUT_DEV))
+#define DEV_IS_EXPANDER(type)  ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
 
 #define PM8001_NAME_LENGTH             32/* generic length of strings */
 extern struct list_head hba_list;
 extern const struct pm8001_dispatch pm8001_8001_dispatch;
+extern const struct pm8001_dispatch pm8001_80xx_dispatch;
 
 struct pm8001_hba_info;
 struct pm8001_ccb_info;
@@ -131,15 +132,15 @@ struct pm8001_ioctl_payload {
 struct pm8001_dispatch {
        char *name;
        int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
-       int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature);
+       int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha);
        void (*chip_rst)(struct pm8001_hba_info *pm8001_ha);
        int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha);
        void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha);
-       irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha);
+       irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec);
        u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha);
-       int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha);
-       void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha);
-       void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha);
+       int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+       void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
+       void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec);
        void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
        int (*smp_req)(struct pm8001_hba_info *pm8001_ha,
                struct pm8001_ccb_info *ccb);
@@ -173,6 +174,7 @@ struct pm8001_dispatch {
 };
 
 struct pm8001_chip_info {
+       u32     encrypt;
        u32     n_phy;
        const struct pm8001_dispatch    *dispatch;
 };
@@ -204,7 +206,7 @@ struct pm8001_phy {
 };
 
 struct pm8001_device {
-       enum sas_dev_type       dev_type;
+       enum sas_device_type    dev_type;
        struct domain_device    *sas_device;
        u32                     attached_phy;
        u32                     id;
@@ -256,7 +258,20 @@ struct mpi_mem_req {
        struct mpi_mem          region[USI_MAX_MEMCNT];
 };
 
-struct main_cfg_table {
+struct encrypt {
+       u32     cipher_mode;
+       u32     sec_mode;
+       u32     status;
+       u32     flag;
+};
+
+struct sas_phy_attribute_table {
+       u32     phystart1_16[16];
+       u32     outbound_hw_event_pid1_16[16];
+};
+
+union main_cfg_table {
+       struct {
        u32                     signature;
        u32                     interface_rev;
        u32                     firmware_rev;
@@ -292,19 +307,69 @@ struct main_cfg_table {
        u32                     fatal_err_dump_length1;
        u32                     hda_mode_flag;
        u32                     anolog_setup_table_offset;
+       u32                     rsvd[4];
+       } pm8001_tbl;
+
+       struct {
+       u32                     signature;
+       u32                     interface_rev;
+       u32                     firmware_rev;
+       u32                     max_out_io;
+       u32                     max_sgl;
+       u32                     ctrl_cap_flag;
+       u32                     gst_offset;
+       u32                     inbound_queue_offset;
+       u32                     outbound_queue_offset;
+       u32                     inbound_q_nppd_hppd;
+       u32                     rsvd[8];
+       u32                     crc_core_dump;
+       u32                     rsvd1;
+       u32                     upper_event_log_addr;
+       u32                     lower_event_log_addr;
+       u32                     event_log_size;
+       u32                     event_log_severity;
+       u32                     upper_pcs_event_log_addr;
+       u32                     lower_pcs_event_log_addr;
+       u32                     pcs_event_log_size;
+       u32                     pcs_event_log_severity;
+       u32                     fatal_err_interrupt;
+       u32                     fatal_err_dump_offset0;
+       u32                     fatal_err_dump_length0;
+       u32                     fatal_err_dump_offset1;
+       u32                     fatal_err_dump_length1;
+       u32                     gpio_led_mapping;
+       u32                     analog_setup_table_offset;
+       u32                     int_vec_table_offset;
+       u32                     phy_attr_table_offset;
+       u32                     port_recovery_timer;
+       u32                     interrupt_reassertion_delay;
+       } pm80xx_tbl;
 };
-struct general_status_table {
+
+union general_status_table {
+       struct {
        u32                     gst_len_mpistate;
        u32                     iq_freeze_state0;
        u32                     iq_freeze_state1;
        u32                     msgu_tcnt;
        u32                     iop_tcnt;
-       u32                     reserved;
+       u32                     rsvd;
        u32                     phy_state[8];
-       u32                     reserved1;
-       u32                     reserved2;
-       u32                     reserved3;
+       u32                     gpio_input_val;
+       u32                     rsvd1[2];
+       u32                     recover_err_info[8];
+       } pm8001_tbl;
+       struct {
+       u32                     gst_len_mpistate;
+       u32                     iq_freeze_state0;
+       u32                     iq_freeze_state1;
+       u32                     msgu_tcnt;
+       u32                     iop_tcnt;
+       u32                     rsvd[9];
+       u32                     gpio_input_val;
+       u32                     rsvd1[2];
        u32                     recover_err_info[8];
+       } pm80xx_tbl;
 };
 struct inbound_queue_table {
        u32                     element_pri_size_cnt;
@@ -351,15 +416,21 @@ struct pm8001_hba_info {
        struct device           *dev;
        struct pm8001_hba_memspace io_mem[6];
        struct mpi_mem_req      memoryMap;
+       struct encrypt          encrypt_info; /* support encryption */
        void __iomem    *msg_unit_tbl_addr;/*Message Unit Table Addr*/
        void __iomem    *main_cfg_tbl_addr;/*Main Config Table Addr*/
        void __iomem    *general_stat_tbl_addr;/*General Status Table Addr*/
        void __iomem    *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/
        void __iomem    *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/
-       struct main_cfg_table   main_cfg_tbl;
-       struct general_status_table     gs_tbl;
-       struct inbound_queue_table      inbnd_q_tbl[PM8001_MAX_INB_NUM];
-       struct outbound_queue_table     outbnd_q_tbl[PM8001_MAX_OUTB_NUM];
+       void __iomem    *pspa_q_tbl_addr;
+                       /*MPI SAS PHY attributes Queue Config Table Addr*/
+       void __iomem    *ivt_tbl_addr; /*MPI IVT Table Addr */
+       union main_cfg_table    main_cfg_tbl;
+       union general_status_table      gs_tbl;
+       struct inbound_queue_table      inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM];
+       struct outbound_queue_table     outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM];
+       struct sas_phy_attribute_table  phy_attr_table;
+                                       /* MPI SAS PHY attributes */
        u8                      sas_addr[SAS_ADDR_SIZE];
        struct sas_ha_struct    *sas;/* SCSI/SAS glue */
        struct Scsi_Host        *shost;
@@ -372,10 +443,12 @@ struct pm8001_hba_info {
        struct pm8001_port      port[PM8001_MAX_PHYS];
        u32                     id;
        u32                     irq;
+       u32                     iomb_size; /* SPC and SPCV IOMB size */
        struct pm8001_device    *devices;
        struct pm8001_ccb_info  *ccb_info;
 #ifdef PM8001_USE_MSIX
-       struct msix_entry       msix_entries[16];/*for msi-x interrupt*/
+       struct msix_entry       msix_entries[PM8001_MAX_MSIX_VEC];
+                                       /*for msi-x interrupt*/
        int                     number_of_intr;/*will be used in remove()*/
 #endif
 #ifdef PM8001_USE_TASKLET
@@ -383,7 +456,10 @@ struct pm8001_hba_info {
 #endif
        u32                     logging_level;
        u32                     fw_status;
+       u32                     smp_exp_mode;
+       u32                     int_vector;
        const struct firmware   *fw_image;
+       u8                      outq[PM8001_MAX_MSIX_VEC];
 };
 
 struct pm8001_work {
@@ -419,6 +495,9 @@ struct pm8001_fw_image_header {
 #define FLASH_UPDATE_DNLD_NOT_SUPPORTED                0x10
 #define FLASH_UPDATE_DISABLED                  0x11
 
+#define        NCQ_READ_LOG_FLAG                       0x80000000
+#define        NCQ_ABORT_ALL_FLAG                      0x40000000
+#define        NCQ_2ND_RLE_FLAG                        0x20000000
 /**
  * brief param structure for firmware flash update.
  */
@@ -484,6 +563,7 @@ int pm8001_dev_found(struct domain_device *dev);
 void pm8001_dev_gone(struct domain_device *dev);
 int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
 int pm8001_I_T_nexus_reset(struct domain_device *dev);
+int pm8001_I_T_nexus_event_handler(struct domain_device *dev);
 int pm8001_query_task(struct sas_task *task);
 void pm8001_open_reject_retry(
        struct pm8001_hba_info *pm8001_ha,
@@ -493,6 +573,61 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
        dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
        u32 mem_size, u32 align);
 
+void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha);
+int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
+                       struct inbound_queue_table *circularQ,
+                       u32 opCode, void *payload, u32 responseQueue);
+int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ,
+                               u16 messageSize, void **messagePtr);
+u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
+                       struct outbound_queue_table *circularQ, u8 bc);
+u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
+                       struct outbound_queue_table *circularQ,
+                       void **messagePtr1, u8 *pBC);
+int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,
+                       struct pm8001_device *pm8001_dev, u32 state);
+int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
+                                       void *payload);
+int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
+                                       void *fw_flash_updata_info, u32 tag);
+int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
+int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload);
+int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,
+                               struct pm8001_ccb_info *ccb,
+                               struct pm8001_tmf_task *tmf);
+int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha,
+                               struct pm8001_device *pm8001_dev,
+                               u8 flag, u32 task_tag, u32 cmd_tag);
+int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id);
+void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd);
+void pm8001_work_fn(struct work_struct *work);
+int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha,
+                                       void *data, int handler);
+void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate);
+void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr);
+void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i);
+int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
+                                                       void *piomb);
+int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb);
+int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
+struct sas_task *pm8001_alloc_task(void);
+void pm8001_task_done(struct sas_task *task);
+void pm8001_free_task(struct sas_task *task);
+void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
+struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
+                                       u32 device_id);
+int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);
+
 int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
 
 /* ctl shared API */
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
new file mode 100644 (file)
index 0000000..302514d
--- /dev/null
@@ -0,0 +1,4130 @@
+/*
+ * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 PMC-Sierra, Inc.,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+ #include <linux/slab.h>
+ #include "pm8001_sas.h"
+ #include "pm80xx_hwi.h"
+ #include "pm8001_chips.h"
+ #include "pm8001_ctl.h"
+
+#define SMP_DIRECT 1
+#define SMP_INDIRECT 2
+/**
+ * read_main_config_table - read the configure table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature    =
+               pm8001_mr32(address, MAIN_SIGNATURE_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev =
+               pm8001_mr32(address, MAIN_INTERFACE_REVISION);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev =
+               pm8001_mr32(address, MAIN_FW_REVISION);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io   =
+               pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl      =
+               pm8001_mr32(address, MAIN_MAX_SGL_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag =
+               pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset   =
+               pm8001_mr32(address, MAIN_GST_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset =
+               pm8001_mr32(address, MAIN_IBQ_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset =
+               pm8001_mr32(address, MAIN_OBQ_OFFSET);
+
+       /* read Error Dump Offset and Length */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 =
+               pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH);
+
+       /* read GPIO LED settings from the configuration table */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping =
+               pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET);
+
+       /* read analog Setting offset from the configuration table */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset =
+               pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET);
+
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset =
+               pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
+               pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
+}
+
+/**
+ * read_general_status_table - read the general status table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->general_stat_tbl_addr;
+       pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate   =
+                       pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0   =
+                       pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1   =
+                       pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt          =
+                       pm8001_mr32(address, GST_MSGUTCNT_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt           =
+                       pm8001_mr32(address, GST_IOPTCNT_OFFSET);
+       pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val     =
+                       pm8001_mr32(address, GST_GPIO_INPUT_VAL);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET0);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET1);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET2);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET3);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET4);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET5);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] =
+                       pm8001_mr32(address, GST_RERRINFO_OFFSET6);
+       pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] =
+                        pm8001_mr32(address, GST_RERRINFO_OFFSET7);
+}
+/**
+ * read_phy_attr_table - read the phy attribute table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->pspa_q_tbl_addr;
+       pm8001_ha->phy_attr_table.phystart1_16[0] =
+                       pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[1] =
+                       pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[2] =
+                       pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[3] =
+                       pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[4] =
+                       pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[5] =
+                       pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[6] =
+                       pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[7] =
+                       pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[8] =
+                       pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[9] =
+                       pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[10] =
+                       pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[11] =
+                       pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[12] =
+                       pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[13] =
+                       pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[14] =
+                       pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET);
+       pm8001_ha->phy_attr_table.phystart1_16[15] =
+                       pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET);
+
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET);
+       pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] =
+                       pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET);
+
+}
+
+/**
+ * read_inbnd_queue_table - read the inbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+       void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+               u32 offset = i * 0x20;
+               pm8001_ha->inbnd_q_tbl[i].pi_pci_bar =
+                       get_pci_bar_index(pm8001_mr32(address,
+                               (offset + IB_PIPCI_BAR)));
+               pm8001_ha->inbnd_q_tbl[i].pi_offset =
+                       pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET));
+       }
+}
+
+/**
+ * read_outbnd_queue_table - read the outbound queue table and save it.
+ * @pm8001_ha: our hba card information
+ */
+static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+       void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+               u32 offset = i * 0x24;
+               pm8001_ha->outbnd_q_tbl[i].ci_pci_bar =
+                       get_pci_bar_index(pm8001_mr32(address,
+                               (offset + OB_CIPCI_BAR)));
+               pm8001_ha->outbnd_q_tbl[i].ci_offset =
+                       pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET));
+       }
+}
+
+/**
+ * init_default_table_values - init the default table.
+ * @pm8001_ha: our hba card information
+ */
+static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+{
+       int i;
+       u32 offsetib, offsetob;
+       void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr;
+       void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr;
+
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr         =
+               pm8001_ha->memoryMap.region[AAP1].phys_addr_hi;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr         =
+               pm8001_ha->memoryMap.region[AAP1].phys_addr_lo;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size               =
+                                                       PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity           = 0x01;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr     =
+               pm8001_ha->memoryMap.region[IOP].phys_addr_hi;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr     =
+               pm8001_ha->memoryMap.region[IOP].phys_addr_lo;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size           =
+                                                       PM8001_EVENT_LOG_SIZE;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity       = 0x01;
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt          = 0x01;
+
+       /* Disable end to end CRC checking */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
+
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
+               pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt  =
+                       PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
+               pm8001_ha->inbnd_q_tbl[i].upper_base_addr       =
+                       pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
+               pm8001_ha->inbnd_q_tbl[i].lower_base_addr       =
+               pm8001_ha->memoryMap.region[IB + i].phys_addr_lo;
+               pm8001_ha->inbnd_q_tbl[i].base_virt             =
+                       (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr;
+               pm8001_ha->inbnd_q_tbl[i].total_length          =
+                       pm8001_ha->memoryMap.region[IB + i].total_len;
+               pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr    =
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_hi;
+               pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr    =
+                       pm8001_ha->memoryMap.region[CI + i].phys_addr_lo;
+               pm8001_ha->inbnd_q_tbl[i].ci_virt               =
+                       pm8001_ha->memoryMap.region[CI + i].virt_ptr;
+               offsetib = i * 0x20;
+               pm8001_ha->inbnd_q_tbl[i].pi_pci_bar            =
+                       get_pci_bar_index(pm8001_mr32(addressib,
+                               (offsetib + 0x14)));
+               pm8001_ha->inbnd_q_tbl[i].pi_offset             =
+                       pm8001_mr32(addressib, (offsetib + 0x18));
+               pm8001_ha->inbnd_q_tbl[i].producer_idx          = 0;
+               pm8001_ha->inbnd_q_tbl[i].consumer_index        = 0;
+       }
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
+               pm8001_ha->outbnd_q_tbl[i].element_size_cnt     =
+                       PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
+               pm8001_ha->outbnd_q_tbl[i].upper_base_addr      =
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
+               pm8001_ha->outbnd_q_tbl[i].lower_base_addr      =
+                       pm8001_ha->memoryMap.region[OB + i].phys_addr_lo;
+               pm8001_ha->outbnd_q_tbl[i].base_virt            =
+                       (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr;
+               pm8001_ha->outbnd_q_tbl[i].total_length         =
+                       pm8001_ha->memoryMap.region[OB + i].total_len;
+               pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr   =
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_hi;
+               pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr   =
+                       pm8001_ha->memoryMap.region[PI + i].phys_addr_lo;
+               /* interrupt vector based on oq */
+               pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24);
+               pm8001_ha->outbnd_q_tbl[i].pi_virt              =
+                       pm8001_ha->memoryMap.region[PI + i].virt_ptr;
+               offsetob = i * 0x24;
+               pm8001_ha->outbnd_q_tbl[i].ci_pci_bar           =
+                       get_pci_bar_index(pm8001_mr32(addressob,
+                       offsetob + 0x14));
+               pm8001_ha->outbnd_q_tbl[i].ci_offset            =
+                       pm8001_mr32(addressob, (offsetob + 0x18));
+               pm8001_ha->outbnd_q_tbl[i].consumer_idx         = 0;
+               pm8001_ha->outbnd_q_tbl[i].producer_index       = 0;
+       }
+}
+
+/**
+ * update_main_config_table - update the main default table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
+       pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd);
+       pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr);
+       pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr);
+       pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
+       pm8001_mw32(address, MAIN_EVENT_LOG_OPTION,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
+       pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
+       pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
+       pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump);
+
+       /* SPCv specific */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF;
+       /* Set GPIOLED to 0x2 for LED indicator */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000;
+       pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping);
+
+       pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
+       pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
+}
+
+/**
+ * update_inbnd_queue_table - update the inbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+                                        int number)
+{
+       void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
+       u16 offset = number * 0x20;
+       pm8001_mw32(address, offset + IB_PROPERITY_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt);
+       pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].upper_base_addr);
+       pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].lower_base_addr);
+       pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr);
+       pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr);
+}
+
+/**
+ * update_outbnd_queue_table - update the outbound queue table to the HBA.
+ * @pm8001_ha: our hba card information
+ */
+static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+                                                int number)
+{
+       void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
+       u16 offset = number * 0x24;
+       pm8001_mw32(address, offset + OB_PROPERITY_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].element_size_cnt);
+       pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].upper_base_addr);
+       pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].lower_base_addr);
+       pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr);
+       pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr);
+       pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET,
+               pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay);
+}
+
+/**
+ * mpi_init_check - check firmware initialization status.
+ * @pm8001_ha: our hba card information
+ */
+static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 max_wait_count;
+       u32 value;
+       u32 gst_len_mpistate;
+
+       /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the
+       table is updated */
+       pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);
+       /* wait until Inbound DoorBell Clear Register toggled */
+       max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+               value &= SPCv_MSGU_CFG_TABLE_UPDATE;
+       } while ((value != 0) && (--max_wait_count));
+
+       if (!max_wait_count)
+               return -1;
+       /* check the MPI-State for initialization upto 100ms*/
+       max_wait_count = 100 * 1000;/* 100 msec */
+       do {
+               udelay(1);
+               gst_len_mpistate =
+                       pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+                                       GST_GSTLEN_MPIS_OFFSET);
+       } while ((GST_MPI_STATE_INIT !=
+               (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count));
+       if (!max_wait_count)
+               return -1;
+
+       /* check MPI Initialization error */
+       gst_len_mpistate = gst_len_mpistate >> 16;
+       if (0x0000 != gst_len_mpistate)
+               return -1;
+
+       return 0;
+}
+
+/**
+ * check_fw_ready - The LLDD check if the FW is ready, if not, return error.
+ * @pm8001_ha: our hba card information
+ */
+static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 value;
+       u32 max_wait_count;
+       u32 max_wait_time;
+       int ret = 0;
+
+       /* reset / PCIe ready */
+       max_wait_time = max_wait_count = 100 * 1000;    /* 100 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while ((value == 0xFFFFFFFF) && (--max_wait_count));
+
+       /* check ila status */
+       max_wait_time = max_wait_count = 1000 * 1000;   /* 1000 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while (((value & SCRATCH_PAD_ILA_READY) !=
+                       SCRATCH_PAD_ILA_READY) && (--max_wait_count));
+       if (!max_wait_count)
+               ret = -1;
+       else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" ila ready status in %d millisec\n",
+                               (max_wait_time - max_wait_count)));
+       }
+
+       /* check RAAE status */
+       max_wait_time = max_wait_count = 1800 * 1000;   /* 1800 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while (((value & SCRATCH_PAD_RAAE_READY) !=
+                               SCRATCH_PAD_RAAE_READY) && (--max_wait_count));
+       if (!max_wait_count)
+               ret = -1;
+       else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" raae ready status in %d millisec\n",
+                                       (max_wait_time - max_wait_count)));
+       }
+
+       /* check iop0 status */
+       max_wait_time = max_wait_count = 600 * 1000;    /* 600 milli sec */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+       } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) &&
+                       (--max_wait_count));
+       if (!max_wait_count)
+               ret = -1;
+       else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" iop0 ready status in %d millisec\n",
+                               (max_wait_time - max_wait_count)));
+       }
+
+       /* check iop1 status only for 16 port controllers */
+       if ((pm8001_ha->chip_id != chip_8008) &&
+                       (pm8001_ha->chip_id != chip_8009)) {
+               /* 200 milli sec */
+               max_wait_time = max_wait_count = 200 * 1000;
+               do {
+                       udelay(1);
+                       value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+               } while (((value & SCRATCH_PAD_IOP1_READY) !=
+                               SCRATCH_PAD_IOP1_READY) && (--max_wait_count));
+               if (!max_wait_count)
+                       ret = -1;
+               else {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "iop1 ready status in %d millisec\n",
+                               (max_wait_time - max_wait_count)));
+               }
+       }
+
+       return ret;
+}
+
+static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
+{
+       void __iomem *base_addr;
+       u32     value;
+       u32     offset;
+       u32     pcibar;
+       u32     pcilogic;
+
+       value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+       offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n",
+                               offset, value));
+       pcilogic = (value & 0xFC000000) >> 26;
+       pcibar = get_pci_bar_index(pcilogic);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
+       pm8001_ha->main_cfg_tbl_addr = base_addr =
+               pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
+       pm8001_ha->general_stat_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) &
+                                       0xFFFFFF);
+       pm8001_ha->inbnd_q_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) &
+                                       0xFFFFFF);
+       pm8001_ha->outbnd_q_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) &
+                                       0xFFFFFF);
+       pm8001_ha->ivt_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) &
+                                       0xFFFFFF);
+       pm8001_ha->pspa_q_tbl_addr =
+               base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &
+                                       0xFFFFFF);
+
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("GST OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x18)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("INBND OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("OBND OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x20)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("IVT OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("PSPA OFFSET 0x%x\n",
+                       pm8001_cr32(pm8001_ha, pcibar, offset + 0x90)));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("addr - main cfg %p general status %p\n",
+                       pm8001_ha->main_cfg_tbl_addr,
+                       pm8001_ha->general_stat_tbl_addr));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("addr - inbnd %p obnd %p\n",
+                       pm8001_ha->inbnd_q_tbl_addr,
+                       pm8001_ha->outbnd_q_tbl_addr));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("addr - pspa %p ivt %p\n",
+                       pm8001_ha->pspa_q_tbl_addr,
+                       pm8001_ha->ivt_tbl_addr));
+}
+
+/**
+ * pm80xx_set_thermal_config - support the thermal configuration
+ * @pm8001_ha: our hba card information.
+ */
+int
+pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
+{
+       struct set_ctrl_cfg_req payload;
+       struct inbound_queue_table *circularQ;
+       int rc;
+       u32 tag;
+       u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+
+       memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               return -1;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+       payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
+                       (THERMAL_ENABLE << 8) | THERMAL_OP_CODE;
+       payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+       return rc;
+
+}
+
+/**
+* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol
+* Timer configuration page
+* @pm8001_ha: our hba card information.
+*/
+static int
+pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
+{
+       struct set_ctrl_cfg_req payload;
+       struct inbound_queue_table *circularQ;
+       SASProtocolTimerConfig_t SASConfigPage;
+       int rc;
+       u32 tag;
+       u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+
+       memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
+       memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t));
+
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+
+       if (rc)
+               return -1;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+
+       SASConfigPage.pageCode        =  SAS_PROTOCOL_TIMER_CONFIG_PAGE;
+       SASConfigPage.MST_MSI         =  3 << 15;
+       SASConfigPage.STP_SSP_MCT_TMO =  (STP_MCT_TMO << 16) | SSP_MCT_TMO;
+       SASConfigPage.STP_FRM_TMO     = (SAS_MAX_OPEN_TIME << 24) |
+                               (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER;
+       SASConfigPage.STP_IDLE_TMO    =  STP_IDLE_TIME;
+
+       if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF)
+               SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF;
+
+
+       SASConfigPage.OPNRJT_RTRY_INTVL =         (SAS_MFD << 16) |
+                                               SAS_OPNRJT_RTRY_INTVL;
+       SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO =  (SAS_DOPNRJT_RTRY_TMO << 16)
+                                               | SAS_COPNRJT_RTRY_TMO;
+       SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR =  (SAS_DOPNRJT_RTRY_THR << 16)
+                                               | SAS_COPNRJT_RTRY_THR;
+       SASConfigPage.MAX_AIP =  SAS_MAX_AIP;
+
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.pageCode "
+                       "0x%08x\n", SASConfigPage.pageCode));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.MST_MSI "
+                       " 0x%08x\n", SASConfigPage.MST_MSI));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO "
+                       " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.STP_FRM_TMO "
+                       " 0x%08x\n", SASConfigPage.STP_FRM_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.STP_IDLE_TMO "
+                       " 0x%08x\n", SASConfigPage.STP_IDLE_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL "
+                       " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO "
+                       " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO));
+       PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR "
+                       " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR));
+       PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP "
+                       " 0x%08x\n", SASConfigPage.MAX_AIP));
+
+       memcpy(&payload.cfg_pg, &SASConfigPage,
+                        sizeof(SASProtocolTimerConfig_t));
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+       return rc;
+}
+
+/**
+ * pm80xx_get_encrypt_info - Check for encryption
+ * @pm8001_ha: our hba card information.
+ */
+static int
+pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 scratch3_value;
+       int ret;
+
+       /* Read encryption status from SCRATCH PAD 3 */
+       scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
+
+       if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+                                       SCRATCH_PAD3_ENC_READY) {
+               if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+                       pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                               SCRATCH_PAD3_SMF_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                               SCRATCH_PAD3_SMA_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                               SCRATCH_PAD3_SMB_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+               pm8001_ha->encrypt_info.status = 0;
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X."
+                       "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n",
+                       scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+                       pm8001_ha->encrypt_info.sec_mode,
+                       pm8001_ha->encrypt_info.status));
+               ret = 0;
+       } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) ==
+                                       SCRATCH_PAD3_ENC_DISABLED) {
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n",
+                       scratch3_value));
+               pm8001_ha->encrypt_info.status = 0xFFFFFFFF;
+               pm8001_ha->encrypt_info.cipher_mode = 0;
+               pm8001_ha->encrypt_info.sec_mode = 0;
+               return 0;
+       } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+                               SCRATCH_PAD3_ENC_DIS_ERR) {
+               pm8001_ha->encrypt_info.status =
+                       (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
+               if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+                       pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMF_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMA_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMB_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X."
+                       "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+                       scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+                       pm8001_ha->encrypt_info.sec_mode,
+                       pm8001_ha->encrypt_info.status));
+               ret = -1;
+       } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
+                                SCRATCH_PAD3_ENC_ENA_ERR) {
+
+               pm8001_ha->encrypt_info.status =
+                       (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16;
+               if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED)
+                       pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMF_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMA_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA;
+               if ((scratch3_value & SCRATCH_PAD3_SM_MASK) ==
+                                       SCRATCH_PAD3_SMB_ENABLED)
+                       pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB;
+
+               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X."
+                       "Cipher mode 0x%x sec mode 0x%x status 0x%x\n",
+                       scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
+                       pm8001_ha->encrypt_info.sec_mode,
+                       pm8001_ha->encrypt_info.status));
+               ret = -1;
+       }
+       return ret;
+}
+
+/**
+ * pm80xx_encrypt_update - update flash with encryption informtion
+ * @pm8001_ha: our hba card information.
+ */
+static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
+{
+       struct kek_mgmt_req payload;
+       struct inbound_queue_table *circularQ;
+       int rc;
+       u32 tag;
+       u32 opc = OPC_INB_KEK_MANAGEMENT;
+
+       memset(&payload, 0, sizeof(struct kek_mgmt_req));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               return -1;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(tag);
+       /* Currently only one key is used. New KEK index is 1.
+        * Current KEK index is 1. Store KEK to NVRAM is 1.
+        */
+       payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) |
+                                       KEK_MGMT_SUBOP_KEYCARDUPDATE);
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+       return rc;
+}
+
+/**
+ * pm8001_chip_init - the main init function that initialize whole PM8001 chip.
+ * @pm8001_ha: our hba card information
+ */
+static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
+{
+       int ret;
+       u8 i = 0;
+
+       /* check the firmware status */
+       if (-1 == check_fw_ready(pm8001_ha)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Firmware is not ready!\n"));
+               return -EBUSY;
+       }
+
+       /* Initialize pci space address eg: mpi offset */
+       init_pci_device_addresses(pm8001_ha);
+       init_default_table_values(pm8001_ha);
+       read_main_config_table(pm8001_ha);
+       read_general_status_table(pm8001_ha);
+       read_inbnd_queue_table(pm8001_ha);
+       read_outbnd_queue_table(pm8001_ha);
+       read_phy_attr_table(pm8001_ha);
+
+       /* update main config table ,inbound table and outbound table */
+       update_main_config_table(pm8001_ha);
+       for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++)
+               update_inbnd_queue_table(pm8001_ha, i);
+       for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++)
+               update_outbnd_queue_table(pm8001_ha, i);
+
+       /* notify firmware update finished and check initialization status */
+       if (0 == mpi_init_check(pm8001_ha)) {
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("MPI initialize successful!\n"));
+       } else
+               return -EBUSY;
+
+       /* send SAS protocol timer configuration page to FW */
+       ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha);
+
+       /* Check for encryption */
+       if (pm8001_ha->chip->encrypt) {
+               PM8001_INIT_DBG(pm8001_ha,
+                       pm8001_printk("Checking for encryption\n"));
+               ret = pm80xx_get_encrypt_info(pm8001_ha);
+               if (ret == -1) {
+                       PM8001_INIT_DBG(pm8001_ha,
+                               pm8001_printk("Encryption error !!\n"));
+                       if (pm8001_ha->encrypt_info.status == 0x81) {
+                               PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+                                       "Encryption enabled with error."
+                                       "Saving encryption key to flash\n"));
+                               pm80xx_encrypt_update(pm8001_ha);
+                       }
+               }
+       }
+       return 0;
+}
+
+static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 max_wait_count;
+       u32 value;
+       u32 gst_len_mpistate;
+       init_pci_device_addresses(pm8001_ha);
+       /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
+       table is stop */
+       pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
+
+       /* wait until Inbound DoorBell Clear Register toggled */
+       max_wait_count = 2 * 1000 * 1000;       /* 2 sec for spcv/ve */
+       do {
+               udelay(1);
+               value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
+               value &= SPCv_MSGU_CFG_TABLE_RESET;
+       } while ((value != 0) && (--max_wait_count));
+
+       if (!max_wait_count) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("TIMEOUT:IBDB value/=%x\n", value));
+               return -1;
+       }
+
+       /* check the MPI-State for termination in progress */
+       /* wait until Inbound DoorBell Clear Register toggled */
+       max_wait_count = 2 * 1000 * 1000;       /* 2 sec for spcv/ve */
+       do {
+               udelay(1);
+               gst_len_mpistate =
+                       pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
+                       GST_GSTLEN_MPIS_OFFSET);
+               if (GST_MPI_STATE_UNINIT ==
+                       (gst_len_mpistate & GST_MPI_STATE_MASK))
+                       break;
+       } while (--max_wait_count);
+       if (!max_wait_count) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk(" TIME OUT MPI State = 0x%x\n",
+                               gst_len_mpistate & GST_MPI_STATE_MASK));
+               return -1;
+       }
+
+       return 0;
+}
+
+/**
+ * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
+ * the FW register status to the originated status.
+ * @pm8001_ha: our hba card information
+ */
+
+static int
+pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 regval;
+       u32 bootloader_state;
+
+       /* Check if MPI is in ready state to reset */
+       if (mpi_uninit_check(pm8001_ha) != 0) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("MPI state is not ready\n"));
+               return -1;
+       }
+
+       /* checked for reset register normal state; 0x0 */
+       regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("reset register before write : 0x%x\n", regval));
+
+       pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE);
+       mdelay(500);
+
+       regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
+       PM8001_INIT_DBG(pm8001_ha,
+       pm8001_printk("reset register after write 0x%x\n", regval));
+
+       if ((regval & SPCv_SOFT_RESET_READ_MASK) ==
+                       SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" soft reset successful [regval: 0x%x]\n",
+                                       regval));
+       } else {
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" soft reset failed [regval: 0x%x]\n",
+                                       regval));
+
+               /* check bootloader is successfully executed or in HDA mode */
+               bootloader_state =
+                       pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) &
+                       SCRATCH_PAD1_BOOTSTATE_MASK;
+
+               if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state - HDA mode SEEPROM\n"));
+               } else if (bootloader_state ==
+                               SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state - HDA mode Bootstrap Pin\n"));
+               } else if (bootloader_state ==
+                               SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state - HDA mode soft reset\n"));
+               } else if (bootloader_state ==
+                                       SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) {
+                       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "Bootloader state-HDA mode critical error\n"));
+               }
+               return -EBUSY;
+       }
+
+       /* check the firmware status after reset */
+       if (-1 == check_fw_ready(pm8001_ha)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Firmware is not ready!\n"));
+               return -EBUSY;
+       }
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("SPCv soft reset Complete\n"));
+       return 0;
+}
+
+static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
+{
+        u32 i;
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("chip reset start\n"));
+
+       /* do SPCv chip reset. */
+       pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11);
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("SPC soft reset Complete\n"));
+
+       /* Check this ..whether delay is required or no */
+       /* delay 10 usec */
+       udelay(10);
+
+       /* wait for 20 msec until the firmware gets reloaded */
+       i = 20;
+       do {
+               mdelay(1);
+       } while ((--i) != 0);
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("chip reset finished\n"));
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha)
+{
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL);
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL);
+}
+
+/**
+ * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha)
+{
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL);
+}
+
+/**
+ * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+       u32 mask;
+       mask = (u32)(1 << vec);
+
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+       return;
+#endif
+       pm80xx_chip_intx_interrupt_enable(pm8001_ha);
+
+}
+
+/**
+ * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt
+ * @pm8001_ha: our hba card information
+ */
+static void
+pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+#ifdef PM8001_USE_MSIX
+       u32 mask;
+       if (vec == 0xFF)
+               mask = 0xFFFFFFFF;
+       else
+               mask = (u32)(1 << vec);
+       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+       return;
+#endif
+       pm80xx_chip_intx_interrupt_disable(pm8001_ha);
+}
+
+static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct task_abort_req task_abort;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_ABORT;
+       int ret;
+
+       if (!pm8001_ha_dev) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n"));
+               return;
+       }
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot "
+                                               "allocate task\n"));
+               return;
+       }
+
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res)
+               return;
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       memset(&task_abort, 0, sizeof(task_abort));
+       task_abort.abort_all = cpu_to_le32(1);
+       task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       task_abort.tag = cpu_to_le32(ccb_tag);
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+
+}
+
+static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
+               struct pm8001_device *pm8001_ha_dev)
+{
+       struct sata_start_req sata_cmd;
+       int res;
+       u32 ccb_tag;
+       struct pm8001_ccb_info *ccb;
+       struct sas_task *task = NULL;
+       struct host_to_dev_fis fis;
+       struct domain_device *dev;
+       struct inbound_queue_table *circularQ;
+       u32 opc = OPC_INB_SATA_HOST_OPSTART;
+
+       task = sas_alloc_slow_task(GFP_ATOMIC);
+
+       if (!task) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate task !!!\n"));
+               return;
+       }
+       task->task_done = pm8001_task_done;
+
+       res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+       if (res) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("cannot allocate tag !!!\n"));
+               return;
+       }
+
+       /* allocate domain device by ourselves as libsas
+        * is not going to provide any
+       */
+       dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
+       if (!dev) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("Domain device cannot be allocated\n"));
+               sas_free_task(task);
+               return;
+       } else {
+               task->dev = dev;
+               task->dev->lldd_dev = pm8001_ha_dev;
+       }
+
+       ccb = &pm8001_ha->ccb_info[ccb_tag];
+       ccb->device = pm8001_ha_dev;
+       ccb->ccb_tag = ccb_tag;
+       ccb->task = task;
+       pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG;
+       pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG;
+
+       memset(&sata_cmd, 0, sizeof(sata_cmd));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       /* construct read log FIS */
+       memset(&fis, 0, sizeof(struct host_to_dev_fis));
+       fis.fis_type = 0x27;
+       fis.flags = 0x80;
+       fis.command = ATA_CMD_READ_LOG_EXT;
+       fis.lbal = 0x10;
+       fis.sector_count = 0x1;
+
+       sata_cmd.tag = cpu_to_le32(ccb_tag);
+       sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9));
+       memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
+
+       res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
+
+}
+
+/**
+ * mpi_ssp_completion- process the event that FW response to the SSP request.
+ * @pm8001_ha: our hba card information
+ * @piomb: the message contents of this outbound message.
+ *
+ * When FW has completed a ssp request for example a IO request, after it has
+ * filled the SG data with the data, it will trigger this event represent
+ * that he has finished the job,please check the coresponding buffer.
+ * So we will tell the caller who maybe waiting the result to tell upper layer
+ * that the task has been finished.
+ */
+static void
+mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+       struct sas_task *t;
+       struct pm8001_ccb_info *ccb;
+       unsigned long flags;
+       u32 status;
+       u32 param;
+       u32 tag;
+       struct ssp_completion_resp *psspPayload;
+       struct task_status_struct *ts;
+       struct ssp_response_iu *iu;
+       struct pm8001_device *pm8001_dev;
+       psspPayload = (struct ssp_completion_resp *)(piomb + 4);
+       status = le32_to_cpu(psspPayload->status);
+       tag = le32_to_cpu(psspPayload->tag);
+       ccb = &pm8001_ha->ccb_info[tag];
+       if ((status == IO_ABORTED) && ccb->open_retry) {
+               /* Being completed by another */
+               ccb->open_retry = 0;
+               return;
+       }
+       pm8001_dev = ccb->device;
+       param = le32_to_cpu(psspPayload->param);
+       t = ccb->task;
+
+       if (status && status != IO_UNDERFLOW)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("sas IO status 0x%x\n", status));
+       if (unlikely(!t || !t->lldd_task || !t->dev))
+               return;
+       ts = &t->task_status;
+       switch (status) {
+       case IO_SUCCESS:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_SUCCESS ,param = 0x%x\n",
+                               param));
+               if (param == 0) {
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+               } else {
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAS_PROTO_RESPONSE;
+                       ts->residual = param;
+                       iu = &psspPayload->ssp_resp_iu;
+                       sas_ssp_task_response(pm8001_ha->dev, t, iu);
+               }
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ABORTED IOMB Tag\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               break;
+       case IO_UNDERFLOW:
+               /* SSP Completion with error */
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n",
+                               param));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               ts->residual = param;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_NO_DEVICE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_NO_DEVICE\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_PHY_DOWN;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               /* Force the midlayer to retry */
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               if (!t->uldd_task)
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_DMA:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_XFER_ERROR_DMA\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_OFFSET_MISMATCH:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_PORT_IN_RESET:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_PORT_IN_RESET\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_DS_NON_OPERATIONAL:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               if (!t->uldd_task)
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_DS_NON_OPERATIONAL);
+               break;
+       case IO_DS_IN_RECOVERY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_RECOVERY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_TM_TAG_NOT_FOUND:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_TM_TAG_NOT_FOUND\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_SSP_EXT_IU_ZERO_LEN_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", status));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               break;
+       }
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("scsi_status = 0x%x\n ",
+               psspPayload->ssp_resp_iu.status));
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "task 0x%p done with io_status 0x%x resp 0x%x "
+                       "stat 0x%x but aborted by upper layer!\n",
+                       t, status, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* in order to force CPU ordering */
+               t->task_done(t);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+       struct sas_task *t;
+       unsigned long flags;
+       struct task_status_struct *ts;
+       struct pm8001_ccb_info *ccb;
+       struct pm8001_device *pm8001_dev;
+       struct ssp_event_resp *psspPayload =
+               (struct ssp_event_resp *)(piomb + 4);
+       u32 event = le32_to_cpu(psspPayload->event);
+       u32 tag = le32_to_cpu(psspPayload->tag);
+       u32 port_id = le32_to_cpu(psspPayload->port_id);
+
+       ccb = &pm8001_ha->ccb_info[tag];
+       t = ccb->task;
+       pm8001_dev = ccb->device;
+       if (event)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("sas IO status 0x%x\n", event));
+       if (unlikely(!t || !t->lldd_task || !t->dev))
+               return;
+       ts = &t->task_status;
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
+                               port_id, tag, event));
+       switch (event) {
+       case IO_OVERFLOW:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");)
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               ts->residual = 0;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
+               return;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               if (!t->uldd_task)
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
+               return;
+       case IO_XFER_ERROR_UNEXPECTED_PHASE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_OFFSET_MISMATCH:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+               /* TBC: used default set values */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       case IO_XFER_CMD_FRAME_ISSUED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+               return;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", event));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "task 0x%p done with event 0x%x resp 0x%x "
+                       "stat 0x%x but aborted by upper layer!\n",
+                       t, event, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* in order to force CPU ordering */
+               t->task_done(t);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct sas_task *t;
+       struct pm8001_ccb_info *ccb;
+       u32 param;
+       u32 status;
+       u32 tag;
+       struct sata_completion_resp *psataPayload;
+       struct task_status_struct *ts;
+       struct ata_task_resp *resp ;
+       u32 *sata_resp;
+       struct pm8001_device *pm8001_dev;
+       unsigned long flags;
+
+       psataPayload = (struct sata_completion_resp *)(piomb + 4);
+       status = le32_to_cpu(psataPayload->status);
+       tag = le32_to_cpu(psataPayload->tag);
+
+       if (!tag) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("tag null\n"));
+               return;
+       }
+       ccb = &pm8001_ha->ccb_info[tag];
+       param = le32_to_cpu(psataPayload->param);
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("ccb null\n"));
+               return;
+       }
+
+       if (t) {
+               if (t->dev && (t->dev->lldd_dev))
+                       pm8001_dev = t->dev->lldd_dev;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task null\n"));
+               return;
+       }
+
+       if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG))
+               && unlikely(!t || !t->lldd_task || !t->dev)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task or dev null\n"));
+               return;
+       }
+
+       ts = &t->task_status;
+       if (!ts) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("ts null\n"));
+               return;
+       }
+
+       switch (status) {
+       case IO_SUCCESS:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+               if (param == 0) {
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+                       /* check if response is for SEND READ LOG */
+                       if (pm8001_dev &&
+                               (pm8001_dev->id & NCQ_READ_LOG_FLAG)) {
+                               /* set new bit for abort_all */
+                               pm8001_dev->id |= NCQ_ABORT_ALL_FLAG;
+                               /* clear bit for read log */
+                               pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF;
+                               pm80xx_send_abort_all(pm8001_ha, pm8001_dev);
+                               /* Free the tag */
+                               pm8001_tag_free(pm8001_ha, tag);
+                               sas_free_task(t);
+                               return;
+                       }
+               } else {
+                       u8 len;
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAS_PROTO_RESPONSE;
+                       ts->residual = param;
+                       PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("SAS_PROTO_RESPONSE len = %d\n",
+                               param));
+                       sata_resp = &psataPayload->sata_resp[0];
+                       resp = (struct ata_task_resp *)ts->buf;
+                       if (t->ata_task.dma_xfer == 0 &&
+                       t->data_dir == PCI_DMA_FROMDEVICE) {
+                               len = sizeof(struct pio_setup_fis);
+                               PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("PIO read len = %d\n", len));
+                       } else if (t->ata_task.use_ncq) {
+                               len = sizeof(struct set_dev_bits_fis);
+                               PM8001_IO_DBG(pm8001_ha,
+                                       pm8001_printk("FPDMA len = %d\n", len));
+                       } else {
+                               len = sizeof(struct dev_to_host_fis);
+                               PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("other len = %d\n", len));
+                       }
+                       if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
+                               resp->frame_len = len;
+                               memcpy(&resp->ending_fis[0], sata_resp, len);
+                               ts->buf_valid_size = sizeof(*resp);
+                       } else
+                               PM8001_IO_DBG(pm8001_ha,
+                                       pm8001_printk("response to large\n"));
+               }
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ABORTED IOMB Tag\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+               /* following cases are to do cases */
+       case IO_UNDERFLOW:
+               /* SATA Completion with error */
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_UNDERFLOW param = %d\n", param));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               ts->residual = param;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_NO_DEVICE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_NO_DEVICE\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_PHY_DOWN;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_INTERRUPTED;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*in order to force CPU ordering*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/* ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_ACK_NAK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_DMA:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_DMA\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               break;
+       case IO_XFER_ERROR_SATA_LINK_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_PORT_IN_RESET:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_PORT_IN_RESET\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_DS_NON_OPERATIONAL:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha, pm8001_dev,
+                                       IO_DS_NON_OPERATIONAL);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_DS_IN_RECOVERY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_RECOVERY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_DS_IN_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_ERROR\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha, pm8001_dev,
+                                       IO_DS_IN_ERROR);
+                       ts->resp = SAS_TASK_UNDELIVERED;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", status));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task 0x%p done with io_status 0x%x"
+                       " resp 0x%x stat 0x%x but aborted by upper layer!\n",
+                       t, status, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else if (t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* ditto */
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       } else if (!t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/*ditto*/
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
+{
+       struct sas_task *t;
+       struct task_status_struct *ts;
+       struct pm8001_ccb_info *ccb;
+       struct pm8001_device *pm8001_dev;
+       struct sata_event_resp *psataPayload =
+               (struct sata_event_resp *)(piomb + 4);
+       u32 event = le32_to_cpu(psataPayload->event);
+       u32 tag = le32_to_cpu(psataPayload->tag);
+       u32 port_id = le32_to_cpu(psataPayload->port_id);
+       u32 dev_id = le32_to_cpu(psataPayload->device_id);
+       unsigned long flags;
+
+       ccb = &pm8001_ha->ccb_info[tag];
+
+       if (ccb) {
+               t = ccb->task;
+               pm8001_dev = ccb->device;
+       } else {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("No CCB !!!. returning\n"));
+               return;
+       }
+       if (event)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("SATA EVENT 0x%x\n", event));
+
+       /* Check if this is NCQ error */
+       if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) {
+               /* find device using device id */
+               pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id);
+               /* send read log extension */
+               if (pm8001_dev)
+                       pm80xx_send_read_log(pm8001_ha, pm8001_dev);
+               return;
+       }
+
+       if (unlikely(!t || !t->lldd_task || !t->dev)) {
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task or dev null\n"));
+               return;
+       }
+
+       ts = &t->task_status;
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n",
+                               port_id, tag, event));
+       switch (event) {
+       case IO_OVERFLOW:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               ts->residual = 0;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_INTERRUPTED;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_EPROTO;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               if (!t->uldd_task) {
+                       pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAS_QUEUE_FULL;
+                       pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+                       mb();/*ditto*/
+                       spin_unlock_irq(&pm8001_ha->lock);
+                       t->task_done(t);
+                       spin_lock_irq(&pm8001_ha->lock);
+                       return;
+               }
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_UNDELIVERED;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_NAK_RECEIVED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_PEER_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_NAK_R_ERR;
+               break;
+       case IO_XFER_ERROR_REJECTED_NCQ_MODE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_UNDERRUN;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_UNEXPECTED_PHASE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_OVERRUN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_OFFSET_MISMATCH:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_XFER_ZERO_DATA_LEN:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_CMD_FRAME_ISSUED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n"));
+               break;
+       case IO_XFER_PIO_SETUP_ERROR:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_ERROR_INTERNAL_CRC_ERROR:
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n"));
+               /* TBC: used default set values */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       case IO_XFER_DMA_ACTIVATE_TIMEOUT:
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n"));
+               /* TBC: used default set values */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", event));
+               /* not allowed case. Therefore, return failed status */
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_TO;
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("task 0x%p done with io_status 0x%x"
+                       " resp 0x%x stat 0x%x but aborted by upper layer!\n",
+                       t, event, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else if (t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* ditto */
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       } else if (!t->uldd_task) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/*ditto*/
+               spin_unlock_irq(&pm8001_ha->lock);
+               t->task_done(t);
+               spin_lock_irq(&pm8001_ha->lock);
+       }
+}
+
+/*See the comments for mpi_ssp_completion */
+static void
+mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       u32 param, i;
+       struct sas_task *t;
+       struct pm8001_ccb_info *ccb;
+       unsigned long flags;
+       u32 status;
+       u32 tag;
+       struct smp_completion_resp *psmpPayload;
+       struct task_status_struct *ts;
+       struct pm8001_device *pm8001_dev;
+       char *pdma_respaddr = NULL;
+
+       psmpPayload = (struct smp_completion_resp *)(piomb + 4);
+       status = le32_to_cpu(psmpPayload->status);
+       tag = le32_to_cpu(psmpPayload->tag);
+
+       ccb = &pm8001_ha->ccb_info[tag];
+       param = le32_to_cpu(psmpPayload->param);
+       t = ccb->task;
+       ts = &t->task_status;
+       pm8001_dev = ccb->device;
+       if (status)
+               PM8001_FAIL_DBG(pm8001_ha,
+                       pm8001_printk("smp IO status 0x%x\n", status));
+       if (unlikely(!t || !t->lldd_task || !t->dev))
+               return;
+
+       switch (status) {
+
+       case IO_SUCCESS:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_GOOD;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+                       PM8001_IO_DBG(pm8001_ha,
+                               pm8001_printk("DIRECT RESPONSE Length:%d\n",
+                                               param));
+                       pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
+                                               ((u64)sg_dma_address
+                                               (&t->smp_task.smp_resp))));
+                       for (i = 0; i < param; i++) {
+                               *(pdma_respaddr+i) = psmpPayload->_r_a[i];
+                               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                                       "SMP Byte%d DMA data 0x%x psmp 0x%x\n",
+                                       i, *(pdma_respaddr+i),
+                                       psmpPayload->_r_a[i]));
+                       }
+               }
+               break;
+       case IO_ABORTED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ABORTED IOMB\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_ABORTED_TASK;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_OVERFLOW:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DATA_OVERRUN;
+               ts->residual = 0;
+               if (pm8001_dev)
+                       pm8001_dev->running_req--;
+               break;
+       case IO_NO_DEVICE:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_PHY_DOWN;
+               break;
+       case IO_ERROR_HW_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ERROR_HW_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_BUSY;
+               break;
+       case IO_XFER_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_BUSY;
+               break;
+       case IO_XFER_ERROR_PHY_NOT_READY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAM_STAT_BUSY;
+               break;
+       case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_ZONE_VIOLATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               break;
+       case IO_OPEN_CNX_ERROR_BREAK:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_CONT0;
+               break;
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
+       case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE:
+       case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+               pm8001_handle_event(pm8001_ha,
+                               pm8001_dev,
+                               IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
+               break;
+       case IO_OPEN_CNX_ERROR_BAD_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+               break;
+       case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(\
+                       "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+               break;
+       case IO_OPEN_CNX_ERROR_WRONG_DESTINATION:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+               break;
+       case IO_XFER_ERROR_RX_FRAME:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_ERROR_RX_FRAME\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_XFER_OPEN_RETRY_TIMEOUT:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_ERROR_INTERNAL_SMP_RESOURCE:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_QUEUE_FULL;
+               break;
+       case IO_PORT_IN_RESET:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_PORT_IN_RESET\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_DS_NON_OPERATIONAL:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_NON_OPERATIONAL\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               break;
+       case IO_DS_IN_RECOVERY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_DS_IN_RECOVERY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n"));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_OPEN_REJECT;
+               ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+               break;
+       default:
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("Unknown status 0x%x\n", status));
+               ts->resp = SAS_TASK_COMPLETE;
+               ts->stat = SAS_DEV_NO_RESPONSE;
+               /* not allowed case. Therefore, return failed status */
+               break;
+       }
+       spin_lock_irqsave(&t->task_state_lock, flags);
+       t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+       t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+       t->task_state_flags |= SAS_TASK_STATE_DONE;
+       if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                       "task 0x%p done with io_status 0x%x resp 0x%x"
+                       "stat 0x%x but aborted by upper layer!\n",
+                       t, status, ts->resp, ts->stat));
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+       } else {
+               spin_unlock_irqrestore(&t->task_state_lock, flags);
+               pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+               mb();/* in order to force CPU ordering */
+               t->task_done(t);
+       }
+}
+
+/**
+ * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW.
+ * @pm8001_ha: our hba card information
+ * @Qnum: the outbound queue message number.
+ * @SEA: source of event to ack
+ * @port_id: port id.
+ * @phyId: phy id.
+ * @param0: parameter 0.
+ * @param1: parameter 1.
+ */
+static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
+       u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1)
+{
+       struct hw_event_ack_req  payload;
+       u32 opc = OPC_INB_SAS_HW_EVENT_ACK;
+
+       struct inbound_queue_table *circularQ;
+
+       memset((u8 *)&payload, 0, sizeof(payload));
+       circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
+       payload.tag = cpu_to_le32(1);
+       payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
+               ((phyId & 0xFF) << 24) | (port_id & 0xFF));
+       payload.param0 = cpu_to_le32(param0);
+       payload.param1 = cpu_to_le32(param1);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+}
+
+static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+       u32 phyId, u32 phy_op);
+
+/**
+ * hw_event_sas_phy_up -FW tells me a SAS phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+
+       u8 link_rate =
+               (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+       u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+       struct pm8001_port *port = &pm8001_ha->port[port_id];
+       struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       unsigned long flags;
+       u8 deviceType = pPayload->sas_identify.dev_type;
+       port->port_state = portstate;
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+               "portid:%d; phyid:%d; linkrate:%d; "
+               "portstate:%x; devicetype:%x\n",
+               port_id, phy_id, link_rate, portstate, deviceType));
+
+       switch (deviceType) {
+       case SAS_PHY_UNUSED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("device type no device.\n"));
+               break;
+       case SAS_END_DEVICE:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
+               pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
+                       PHY_NOTIFY_ENABLE_SPINUP);
+               port->port_attached = 1;
+               pm8001_get_lrate_mode(phy, link_rate);
+               break;
+       case SAS_EDGE_EXPANDER_DEVICE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("expander device.\n"));
+               port->port_attached = 1;
+               pm8001_get_lrate_mode(phy, link_rate);
+               break;
+       case SAS_FANOUT_EXPANDER_DEVICE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("fanout expander device.\n"));
+               port->port_attached = 1;
+               pm8001_get_lrate_mode(phy, link_rate);
+               break;
+       default:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("unknown device type(%x)\n", deviceType));
+               break;
+       }
+       phy->phy_type |= PORT_TYPE_SAS;
+       phy->identify.device_type = deviceType;
+       phy->phy_attached = 1;
+       if (phy->identify.device_type == SAS_END_DEVICE)
+               phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
+       else if (phy->identify.device_type != SAS_PHY_UNUSED)
+               phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
+       phy->sas_phy.oob_mode = SAS_OOB_MODE;
+       sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+       spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+       memcpy(phy->frame_rcvd, &pPayload->sas_identify,
+               sizeof(struct sas_identify_frame)-4);
+       phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4;
+       pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+       spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+       if (pm8001_ha->flags == PM8001F_RUN_TIME)
+               mdelay(200);/*delay a moment to wait disk to spinup*/
+       pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_sata_phy_up -FW tells me a SATA phy up event.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u8 link_rate =
+               (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+
+       u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+       struct pm8001_port *port = &pm8001_ha->port[port_id];
+       struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       unsigned long flags;
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+               "port id %d, phy id %d link_rate %d portstate 0x%x\n",
+                               port_id, phy_id, link_rate, portstate));
+
+       port->port_state = portstate;
+       port->port_attached = 1;
+       pm8001_get_lrate_mode(phy, link_rate);
+       phy->phy_type |= PORT_TYPE_SATA;
+       phy->phy_attached = 1;
+       phy->sas_phy.oob_mode = SATA_OOB_MODE;
+       sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+       spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
+       memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
+               sizeof(struct dev_to_host_fis));
+       phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+       phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+       phy->identify.device_type = SAS_SATA_DEV;
+       pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr);
+       spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
+       pm8001_bytes_dmaed(pm8001_ha, phy_id);
+}
+
+/**
+ * hw_event_phy_down -we should notify the libsas the phy is down.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void
+hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+       u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
+
+       struct pm8001_port *port = &pm8001_ha->port[port_id];
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       port->port_state = portstate;
+       phy->phy_type = 0;
+       phy->identify.device_type = 0;
+       phy->phy_attached = 0;
+       memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
+       switch (portstate) {
+       case PORT_VALID:
+               break;
+       case PORT_INVALID:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" PortInvalid portID %d\n", port_id));
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" Last phy Down and port invalid\n"));
+               port->port_attached = 0;
+               pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+                       port_id, phy_id, 0, 0);
+               break;
+       case PORT_IN_RESET:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" Port In Reset portID %d\n", port_id));
+               break;
+       case PORT_NOT_ESTABLISHED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+               port->port_attached = 0;
+               break;
+       case PORT_LOSTCOMM:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" Last phy Down and port invalid\n"));
+               port->port_attached = 0;
+               pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+                       port_id, phy_id, 0, 0);
+               break;
+       default:
+               port->port_attached = 0;
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" phy Down and(default) = 0x%x\n",
+                       portstate));
+               break;
+
+       }
+}
+
+static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct phy_start_resp *pPayload =
+               (struct phy_start_resp *)(piomb + 4);
+       u32 status =
+               le32_to_cpu(pPayload->status);
+       u32 phy_id =
+               le32_to_cpu(pPayload->phyid);
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
+                               status, phy_id));
+       if (status == 0) {
+               phy->phy_state = 1;
+               if (pm8001_ha->flags == PM8001F_RUN_TIME)
+                       complete(phy->enable_completion);
+       }
+       return 0;
+
+}
+
+/**
+ * mpi_thermal_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct thermal_hw_event *pPayload =
+               (struct thermal_hw_event *)(piomb + 4);
+
+       u32 thermal_event = le32_to_cpu(pPayload->thermal_event);
+       u32 rht_lht = le32_to_cpu(pPayload->rht_lht);
+
+       if (thermal_event & 0x40) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Local high temperature violated!\n"));
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Measured local high temperature %d\n",
+                               ((rht_lht & 0xFF00) >> 8)));
+       }
+       if (thermal_event & 0x10) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Remote high temperature violated!\n"));
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Thermal Event: Measured remote high temperature %d\n",
+                               ((rht_lht & 0xFF000000) >> 24)));
+       }
+       return 0;
+}
+
+/**
+ * mpi_hw_event -The hw event has come.
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       unsigned long flags;
+       struct hw_event_resp *pPayload =
+               (struct hw_event_resp *)(piomb + 4);
+       u32 lr_status_evt_portid =
+               le32_to_cpu(pPayload->lr_status_evt_portid);
+       u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+       u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+       u8 phy_id =
+               (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+       u16 eventType =
+               (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
+       u8 status =
+               (u8)((lr_status_evt_portid & 0x0F000000) >> 24);
+
+       struct sas_ha_struct *sas_ha = pm8001_ha->sas;
+       struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+       struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
+       PM8001_MSG_DBG(pm8001_ha,
+               pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
+                               port_id, phy_id, eventType, status));
+
+       switch (eventType) {
+
+       case HW_EVENT_SAS_PHY_UP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
+               hw_event_sas_phy_up(pm8001_ha, piomb);
+               break;
+       case HW_EVENT_SATA_PHY_UP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
+               hw_event_sata_phy_up(pm8001_ha, piomb);
+               break;
+       case HW_EVENT_SATA_SPINUP_HOLD:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
+               sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+               break;
+       case HW_EVENT_PHY_DOWN:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PHY_DOWN\n"));
+               sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+               phy->phy_attached = 0;
+               phy->phy_state = 0;
+               hw_event_phy_down(pm8001_ha, piomb);
+               break;
+       case HW_EVENT_PORT_INVALID:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_INVALID\n"));
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       /* the broadcast change primitive received, tell the LIBSAS this event
+       to revalidate the sas domain*/
+       case HW_EVENT_BROADCAST_CHANGE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE,
+                       port_id, phy_id, 1, 0);
+               spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+               sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
+               spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+               break;
+       case HW_EVENT_PHY_ERROR:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PHY_ERROR\n"));
+               sas_phy_disconnected(&phy->sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+               break;
+       case HW_EVENT_BROADCAST_EXP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_BROADCAST_EXP\n"));
+               spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+               sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
+               spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+               break;
+       case HW_EVENT_LINK_ERR_INVALID_DWORD:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_DISPARITY_ERROR,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_CODE_VIOLATION:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_CODE_VIOLATION,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                               "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_MALFUNCTION:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_MALFUNCTION\n"));
+               break;
+       case HW_EVENT_BROADCAST_SES:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_BROADCAST_SES\n"));
+               spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
+               sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
+               spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+               break;
+       case HW_EVENT_INBOUND_CRC_ERROR:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_INBOUND_CRC_ERROR,
+                       port_id, phy_id, 0, 0);
+               break;
+       case HW_EVENT_HARD_RESET_RECEIVED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n"));
+               sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+               break;
+       case HW_EVENT_ID_FRAME_TIMEOUT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n"));
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_PORT_RESET_TIMER_TMO:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
+               pm80xx_hw_event_ack_req(pm8001_ha, 0,
+                       HW_EVENT_PORT_RECOVERY_TIMER_TMO,
+                       port_id, phy_id, 0, 0);
+               sas_phy_disconnected(sas_phy);
+               phy->phy_attached = 0;
+               sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+               break;
+       case HW_EVENT_PORT_RECOVER:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+               break;
+       case HW_EVENT_PORT_RESET_COMPLETE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
+               break;
+       case EVENT_BROADCAST_ASYNCH_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n"));
+               break;
+       default:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("Unknown event type 0x%x\n", eventType));
+               break;
+       }
+       return 0;
+}
+
+/**
+ * mpi_phy_stop_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       struct phy_stop_resp *pPayload =
+               (struct phy_stop_resp *)(piomb + 4);
+       u32 status =
+               le32_to_cpu(pPayload->status);
+       u32 phyid =
+               le32_to_cpu(pPayload->phyid);
+       struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("phy:0x%x status:0x%x\n",
+                                       phyid, status));
+       if (status == 0)
+               phy->phy_state = 0;
+       return 0;
+}
+
+/**
+ * mpi_set_controller_config_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       struct set_ctrl_cfg_resp *pPayload =
+                       (struct set_ctrl_cfg_resp *)(piomb + 4);
+       u32 status = le32_to_cpu(pPayload->status);
+       u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd);
+
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n",
+                       status, err_qlfr_pgcd));
+
+       return 0;
+}
+
+/**
+ * mpi_get_controller_config_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_get_phy_profile_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_flash_op_ext_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_set_phy_profile_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * mpi_kek_management_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4);
+
+       u32 status = le32_to_cpu(pPayload->status);
+       u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop);
+       u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr);
+
+       PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+               "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n",
+               status, kidx_new_curr_ksop, err_qlfr));
+
+       return 0;
+}
+
+/**
+ * mpi_dek_management_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * ssp_coalesced_comp_resp - SPCv specific
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
+                       void *piomb)
+{
+       PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk(" pm80xx_addition_functionality\n"));
+
+       return 0;
+}
+
+/**
+ * process_one_iomb - process one outbound Queue memory block
+ * @pm8001_ha: our hba card information
+ * @piomb: IO message buffer
+ */
+static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
+{
+       __le32 pHeader = *(__le32 *)piomb;
+       u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF);
+
+       switch (opc) {
+       case OPC_OUB_ECHO:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
+               break;
+       case OPC_OUB_HW_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_HW_EVENT\n"));
+               mpi_hw_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_THERM_HW_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_THERMAL_EVENT\n"));
+               mpi_thermal_hw_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_COMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_COMP\n"));
+               mpi_ssp_completion(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SMP_COMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SMP_COMP\n"));
+               mpi_smp_completion(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_LOCAL_PHY_CNTRL:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n"));
+               pm8001_mpi_local_phy_ctl(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEV_REGIST:
+               PM8001_MSG_DBG(pm8001_ha,
+               pm8001_printk("OPC_OUB_DEV_REGIST\n"));
+               pm8001_mpi_reg_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEREG_DEV:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("unresgister the deviece\n"));
+               pm8001_mpi_dereg_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_DEV_HANDLE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
+               break;
+       case OPC_OUB_SATA_COMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SATA_COMP\n"));
+               mpi_sata_completion(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SATA_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SATA_EVENT\n"));
+               mpi_sata_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_EVENT\n"));
+               mpi_ssp_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEV_HANDLE_ARRIV:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n"));
+               /*This is for target*/
+               break;
+       case OPC_OUB_SSP_RECV_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n"));
+               /*This is for target*/
+               break;
+       case OPC_OUB_FW_FLASH_UPDATE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n"));
+               pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GPIO_RESPONSE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GPIO_RESPONSE\n"));
+               break;
+       case OPC_OUB_GPIO_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GPIO_EVENT\n"));
+               break;
+       case OPC_OUB_GENERAL_EVENT:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GENERAL_EVENT\n"));
+               pm8001_mpi_general_event(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_ABORT_RSP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n"));
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SATA_ABORT_RSP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n"));
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SAS_DIAG_MODE_START_END:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n"));
+               break;
+       case OPC_OUB_SAS_DIAG_EXECUTE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n"));
+               break;
+       case OPC_OUB_GET_TIME_STAMP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_TIME_STAMP\n"));
+               break;
+       case OPC_OUB_SAS_HW_EVENT_ACK:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n"));
+               break;
+       case OPC_OUB_PORT_CONTROL:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_PORT_CONTROL\n"));
+               break;
+       case OPC_OUB_SMP_ABORT_RSP:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n"));
+               pm8001_mpi_task_abort_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_NVMD_DATA:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_NVMD_DATA\n"));
+               pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SET_NVMD_DATA:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SET_NVMD_DATA\n"));
+               pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEVICE_HANDLE_REMOVAL:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n"));
+               break;
+       case OPC_OUB_SET_DEVICE_STATE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n"));
+               pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_DEVICE_STATE:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n"));
+               break;
+       case OPC_OUB_SET_DEV_INFO:
+               PM8001_MSG_DBG(pm8001_ha,
+                       pm8001_printk("OPC_OUB_SET_DEV_INFO\n"));
+               break;
+       /* spcv specifc commands */
+       case OPC_OUB_PHY_START_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_PHY_START_RESP opcode:%x\n", opc));
+               mpi_phy_start_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_PHY_STOP_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc));
+               mpi_phy_stop_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SET_CONTROLLER_CONFIG:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc));
+               mpi_set_controller_config_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_CONTROLLER_CONFIG:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc));
+               mpi_get_controller_config_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_GET_PHY_PROFILE:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc));
+               mpi_get_phy_profile_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_FLASH_OP_EXT:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc));
+               mpi_flash_op_ext_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SET_PHY_PROFILE:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc));
+               mpi_set_phy_profile_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_KEK_MANAGEMENT_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc));
+               mpi_kek_management_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_DEK_MANAGEMENT_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc));
+               mpi_dek_management_resp(pm8001_ha, piomb);
+               break;
+       case OPC_OUB_SSP_COALESCED_COMP_RESP:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc));
+               ssp_coalesced_comp_resp(pm8001_ha, piomb);
+               break;
+       default:
+               PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
+                       "Unknown outbound Queue IOMB OPC = 0x%x\n", opc));
+               break;
+       }
+}
+
+static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+       struct outbound_queue_table *circularQ;
+       void *pMsg1 = NULL;
+       u8 uninitialized_var(bc);
+       u32 ret = MPI_IO_STATUS_FAIL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&pm8001_ha->lock, flags);
+       circularQ = &pm8001_ha->outbnd_q_tbl[vec];
+       do {
+               ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
+               if (MPI_IO_STATUS_SUCCESS == ret) {
+                       /* process the outbound message */
+                       process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4));
+                       /* free the message from the outbound circular buffer */
+                       pm8001_mpi_msg_free_set(pm8001_ha, pMsg1,
+                                                       circularQ, bc);
+               }
+               if (MPI_IO_STATUS_BUSY == ret) {
+                       /* Update the producer index from SPC */
+                       circularQ->producer_index =
+                               cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
+                       if (le32_to_cpu(circularQ->producer_index) ==
+                               circularQ->consumer_idx)
+                               /* OQ is empty */
+                               break;
+               }
+       } while (1);
+       spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+       return ret;
+}
+
+/* PCI_DMA_... to our direction translation. */
+static const u8 data_dir_flags[] = {
+       [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
+       [PCI_DMA_TODEVICE]      = DATA_DIR_OUT,/* OUTBOUND */
+       [PCI_DMA_FROMDEVICE]    = DATA_DIR_IN,/* INBOUND */
+       [PCI_DMA_NONE]          = DATA_DIR_NONE,/* NO TRANSFER */
+};
+
+static void build_smp_cmd(u32 deviceID, __le32 hTag,
+                       struct smp_req *psmp_cmd, int mode, int length)
+{
+       psmp_cmd->tag = hTag;
+       psmp_cmd->device_id = cpu_to_le32(deviceID);
+       if (mode == SMP_DIRECT) {
+               length = length - 4; /* subtract crc */
+               psmp_cmd->len_ip_ir = cpu_to_le32(length << 16);
+       } else {
+               psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
+       }
+}
+
+/**
+ * pm8001_chip_smp_req - send a SMP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_ccb_info *ccb)
+{
+       int elem, rc;
+       struct sas_task *task = ccb->task;
+       struct domain_device *dev = task->dev;
+       struct pm8001_device *pm8001_dev = dev->lldd_dev;
+       struct scatterlist *sg_req, *sg_resp;
+       u32 req_len, resp_len;
+       struct smp_req smp_cmd;
+       u32 opc;
+       struct inbound_queue_table *circularQ;
+       char *preq_dma_addr = NULL;
+       __le64 tmp_addr;
+       u32 i, length;
+
+       memset(&smp_cmd, 0, sizeof(smp_cmd));
+       /*
+        * DMA-map SMP request, response buffers
+        */
+       sg_req = &task->smp_task.smp_req;
+       elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+       if (!elem)
+               return -ENOMEM;
+       req_len = sg_dma_len(sg_req);
+
+       sg_resp = &task->smp_task.smp_resp;
+       elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+       if (!elem) {
+               rc = -ENOMEM;
+               goto err_out;
+       }
+       resp_len = sg_dma_len(sg_resp);
+       /* must be in dwords */
+       if ((req_len & 0x3) || (resp_len & 0x3)) {
+               rc = -EINVAL;
+               goto err_out_2;
+       }
+
+       opc = OPC_INB_SMP_REQUEST;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       smp_cmd.tag = cpu_to_le32(ccb->ccb_tag);
+
+       length = sg_req->length;
+       PM8001_IO_DBG(pm8001_ha,
+               pm8001_printk("SMP Frame Length %d\n", sg_req->length));
+       if (!(length - 8))
+               pm8001_ha->smp_exp_mode = SMP_DIRECT;
+       else
+               pm8001_ha->smp_exp_mode = SMP_INDIRECT;
+
+       /* DIRECT MODE support only in spcv/ve */
+       pm8001_ha->smp_exp_mode = SMP_DIRECT;
+
+       tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
+       preq_dma_addr = (char *)phys_to_virt(tmp_addr);
+
+       /* INDIRECT MODE command settings. Use DMA */
+       if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("SMP REQUEST INDIRECT MODE\n"));
+               /* for SPCv indirect mode. Place the top 4 bytes of
+                * SMP Request header here. */
+               for (i = 0; i < 4; i++)
+                       smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
+               /* exclude top 4 bytes for SMP req header */
+               smp_cmd.long_smp_req.long_req_addr =
+                       cpu_to_le64((u64)sg_dma_address
+                               (&task->smp_task.smp_req) - 4);
+               /* exclude 4 bytes for SMP req header and CRC */
+               smp_cmd.long_smp_req.long_req_size =
+                       cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8);
+               smp_cmd.long_smp_req.long_resp_addr =
+                               cpu_to_le64((u64)sg_dma_address
+                                       (&task->smp_task.smp_resp));
+               smp_cmd.long_smp_req.long_resp_size =
+                               cpu_to_le32((u32)sg_dma_len
+                                       (&task->smp_task.smp_resp)-4);
+       } else { /* DIRECT MODE */
+               smp_cmd.long_smp_req.long_req_addr =
+                       cpu_to_le64((u64)sg_dma_address
+                                       (&task->smp_task.smp_req));
+               smp_cmd.long_smp_req.long_req_size =
+                       cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
+               smp_cmd.long_smp_req.long_resp_addr =
+                       cpu_to_le64((u64)sg_dma_address
+                               (&task->smp_task.smp_resp));
+               smp_cmd.long_smp_req.long_resp_size =
+                       cpu_to_le32
+                       ((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
+       }
+       if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+               PM8001_IO_DBG(pm8001_ha,
+                       pm8001_printk("SMP REQUEST DIRECT MODE\n"));
+               for (i = 0; i < length; i++)
+                       if (i < 16) {
+                               smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
+                               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                                       "Byte[%d]:%x (DMA data:%x)\n",
+                                       i, smp_cmd.smp_req16[i],
+                                       *(preq_dma_addr)));
+                       } else {
+                               smp_cmd.smp_req[i] = *(preq_dma_addr+i);
+                               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                                       "Byte[%d]:%x (DMA data:%x)\n",
+                                       i, smp_cmd.smp_req[i],
+                                       *(preq_dma_addr)));
+                       }
+       }
+
+       build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
+                               &smp_cmd, pm8001_ha->smp_exp_mode, length);
+       pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
+       return 0;
+
+err_out_2:
+       dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
+                       PCI_DMA_FROMDEVICE);
+err_out:
+       dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
+                       PCI_DMA_TODEVICE);
+       return rc;
+}
+
+static int check_enc_sas_cmd(struct sas_task *task)
+{
+       if ((task->ssp_task.cdb[0] == READ_10)
+               || (task->ssp_task.cdb[0] == WRITE_10)
+               || (task->ssp_task.cdb[0] == WRITE_VERIFY))
+               return 1;
+       else
+               return 0;
+}
+
+static int check_enc_sat_cmd(struct sas_task *task)
+{
+       int ret = 0;
+       switch (task->ata_task.fis.command) {
+       case ATA_CMD_FPDMA_READ:
+       case ATA_CMD_READ_EXT:
+       case ATA_CMD_READ:
+       case ATA_CMD_FPDMA_WRITE:
+       case ATA_CMD_WRITE_EXT:
+       case ATA_CMD_WRITE:
+       case ATA_CMD_PIO_READ:
+       case ATA_CMD_PIO_READ_EXT:
+       case ATA_CMD_PIO_WRITE:
+       case ATA_CMD_PIO_WRITE_EXT:
+               ret = 1;
+               break;
+       default:
+               ret = 0;
+               break;
+       }
+       return ret;
+}
+
+/**
+ * pm80xx_chip_ssp_io_req - send a SSP task to FW
+ * @pm8001_ha: our hba card information.
+ * @ccb: the ccb information this request used.
+ */
+static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_ccb_info *ccb)
+{
+       struct sas_task *task = ccb->task;
+       struct domain_device *dev = task->dev;
+       struct pm8001_device *pm8001_dev = dev->lldd_dev;
+       struct ssp_ini_io_start_req ssp_cmd;
+       u32 tag = ccb->ccb_tag;
+       int ret;
+       u64 phys_addr;
+       struct inbound_queue_table *circularQ;
+       static u32 inb;
+       static u32 outb;
+       u32 opc = OPC_INB_SSPINIIOSTART;
+       memset(&ssp_cmd, 0, sizeof(ssp_cmd));
+       memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
+       /* data address domain added for spcv; set to 0 by host,
+        * used internally by controller
+        * 0 for SAS 1.1 and SAS 2.0 compatible TLR
+        */
+       ssp_cmd.dad_dir_m_tlr =
+               cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);
+       ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+       ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
+       ssp_cmd.tag = cpu_to_le32(tag);
+       if (task->ssp_task.enable_first_burst)
+               ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
+       ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
+       ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
+       memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16);
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       /* Check if encryption is set */
+       if (pm8001_ha->chip->encrypt &&
+               !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption enabled.Sending Encrypt SAS command 0x%x\n",
+                       task->ssp_task.cdb[0]));
+               opc = OPC_INB_SSP_INI_DIF_ENC_IO;
+               /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/
+               ssp_cmd.dad_dir_m_tlr = cpu_to_le32
+                       ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0);
+
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter,
+                                               ccb->n_elem, ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       ssp_cmd.enc_addr_low =
+                               cpu_to_le32(lower_32_bits(phys_addr));
+                       ssp_cmd.enc_addr_high =
+                               cpu_to_le32(upper_32_bits(phys_addr));
+                       ssp_cmd.enc_esgl = cpu_to_le32(1<<31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       ssp_cmd.enc_addr_low =
+                               cpu_to_le32(lower_32_bits(dma_addr));
+                       ssp_cmd.enc_addr_high =
+                               cpu_to_le32(upper_32_bits(dma_addr));
+                       ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.enc_esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       ssp_cmd.enc_addr_low = 0;
+                       ssp_cmd.enc_addr_high = 0;
+                       ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.enc_esgl = 0;
+               }
+               /* XTS mode. All other fields are 0 */
+               ssp_cmd.key_cmode = 0x6 << 4;
+               /* set tweak values. Should be the start lba */
+               ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cdb[2] << 24) |
+                                               (task->ssp_task.cdb[3] << 16) |
+                                               (task->ssp_task.cdb[4] << 8) |
+                                               (task->ssp_task.cdb[5]));
+       } else {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Sending Normal SAS command 0x%x inb q %x\n",
+                       task->ssp_task.cdb[0], inb));
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter, ccb->n_elem,
+                                       ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       ssp_cmd.addr_low =
+                               cpu_to_le32(lower_32_bits(phys_addr));
+                       ssp_cmd.addr_high =
+                               cpu_to_le32(upper_32_bits(phys_addr));
+                       ssp_cmd.esgl = cpu_to_le32(1<<31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
+                       ssp_cmd.addr_high =
+                               cpu_to_le32(upper_32_bits(dma_addr));
+                       ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       ssp_cmd.addr_low = 0;
+                       ssp_cmd.addr_high = 0;
+                       ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       ssp_cmd.esgl = 0;
+               }
+       }
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++);
+
+       /* rotate the outb queue */
+       outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+
+       return ret;
+}
+
+static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_ccb_info *ccb)
+{
+       struct sas_task *task = ccb->task;
+       struct domain_device *dev = task->dev;
+       struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
+       u32 tag = ccb->ccb_tag;
+       int ret;
+       static u32 inb;
+       static u32 outb;
+       struct sata_start_req sata_cmd;
+       u32 hdr_tag, ncg_tag = 0;
+       u64 phys_addr;
+       u32 ATAP = 0x0;
+       u32 dir;
+       struct inbound_queue_table *circularQ;
+       unsigned long flags;
+       u32 opc = OPC_INB_SATA_HOST_OPSTART;
+       memset(&sata_cmd, 0, sizeof(sata_cmd));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       if (task->data_dir == PCI_DMA_NONE) {
+               ATAP = 0x04; /* no data*/
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
+       } else if (likely(!task->ata_task.device_control_reg_update)) {
+               if (task->ata_task.dma_xfer) {
+                       ATAP = 0x06; /* DMA */
+                       PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
+               } else {
+                       ATAP = 0x05; /* PIO*/
+                       PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
+               }
+               if (task->ata_task.use_ncq &&
+                       dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
+                       ATAP = 0x07; /* FPDMA */
+                       PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
+               }
+       }
+       if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) {
+               task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+               ncg_tag = hdr_tag;
+       }
+       dir = data_dir_flags[task->data_dir] << 8;
+       sata_cmd.tag = cpu_to_le32(tag);
+       sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
+       sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
+
+       sata_cmd.sata_fis = task->ata_task.fis;
+       if (likely(!task->ata_task.device_control_reg_update))
+               sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
+       sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */
+
+       /* Check if encryption is set */
+       if (pm8001_ha->chip->encrypt &&
+               !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
+                       sata_cmd.sata_fis.command));
+               opc = OPC_INB_SATA_DIF_ENC_IO;
+
+               /* set encryption bit */
+               sata_cmd.ncqtag_atap_dir_m_dad =
+                       cpu_to_le32(((ncg_tag & 0xff)<<16)|
+                               ((ATAP & 0x3f) << 10) | 0x20 | dir);
+                                                       /* dad (bit 0-1) is 0 */
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter,
+                                               ccb->n_elem, ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       sata_cmd.enc_addr_low = lower_32_bits(phys_addr);
+                       sata_cmd.enc_addr_high = upper_32_bits(phys_addr);
+                       sata_cmd.enc_esgl = cpu_to_le32(1 << 31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       sata_cmd.enc_addr_low = lower_32_bits(dma_addr);
+                       sata_cmd.enc_addr_high = upper_32_bits(dma_addr);
+                       sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.enc_esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       sata_cmd.enc_addr_low = 0;
+                       sata_cmd.enc_addr_high = 0;
+                       sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.enc_esgl = 0;
+               }
+               /* XTS mode. All other fields are 0 */
+               sata_cmd.key_index_mode = 0x6 << 4;
+               /* set tweak values. Should be the start lba */
+               sata_cmd.twk_val0 =
+                       cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) |
+                                       (sata_cmd.sata_fis.lbah << 16) |
+                                       (sata_cmd.sata_fis.lbam << 8) |
+                                       (sata_cmd.sata_fis.lbal));
+               sata_cmd.twk_val1 =
+                       cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) |
+                                        (sata_cmd.sata_fis.lbam_exp));
+       } else {
+               PM8001_IO_DBG(pm8001_ha, pm8001_printk(
+                       "Sending Normal SATA command 0x%x inb %x\n",
+                       sata_cmd.sata_fis.command, inb));
+               /* dad (bit 0-1) is 0 */
+               sata_cmd.ncqtag_atap_dir_m_dad =
+                       cpu_to_le32(((ncg_tag & 0xff)<<16) |
+                                       ((ATAP & 0x3f) << 10) | dir);
+
+               /* fill in PRD (scatter/gather) table, if any */
+               if (task->num_scatter > 1) {
+                       pm8001_chip_make_sg(task->scatter,
+                                       ccb->n_elem, ccb->buf_prd);
+                       phys_addr = ccb->ccb_dma_handle +
+                               offsetof(struct pm8001_ccb_info, buf_prd[0]);
+                       sata_cmd.addr_low = lower_32_bits(phys_addr);
+                       sata_cmd.addr_high = upper_32_bits(phys_addr);
+                       sata_cmd.esgl = cpu_to_le32(1 << 31);
+               } else if (task->num_scatter == 1) {
+                       u64 dma_addr = sg_dma_address(task->scatter);
+                       sata_cmd.addr_low = lower_32_bits(dma_addr);
+                       sata_cmd.addr_high = upper_32_bits(dma_addr);
+                       sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.esgl = 0;
+               } else if (task->num_scatter == 0) {
+                       sata_cmd.addr_low = 0;
+                       sata_cmd.addr_high = 0;
+                       sata_cmd.len = cpu_to_le32(task->total_xfer_len);
+                       sata_cmd.esgl = 0;
+               }
+                       /* scsi cdb */
+                       sata_cmd.atapi_scsi_cdb[0] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[0]) |
+                               (task->ata_task.atapi_packet[1] << 8) |
+                               (task->ata_task.atapi_packet[2] << 16) |
+                               (task->ata_task.atapi_packet[3] << 24)));
+                       sata_cmd.atapi_scsi_cdb[1] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[4]) |
+                               (task->ata_task.atapi_packet[5] << 8) |
+                               (task->ata_task.atapi_packet[6] << 16) |
+                               (task->ata_task.atapi_packet[7] << 24)));
+                       sata_cmd.atapi_scsi_cdb[2] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[8]) |
+                               (task->ata_task.atapi_packet[9] << 8) |
+                               (task->ata_task.atapi_packet[10] << 16) |
+                               (task->ata_task.atapi_packet[11] << 24)));
+                       sata_cmd.atapi_scsi_cdb[3] =
+                               cpu_to_le32(((task->ata_task.atapi_packet[12]) |
+                               (task->ata_task.atapi_packet[13] << 8) |
+                               (task->ata_task.atapi_packet[14] << 16) |
+                               (task->ata_task.atapi_packet[15] << 24)));
+       }
+
+       /* Check for read log for failed drive and return */
+       if (sata_cmd.sata_fis.command == 0x2f) {
+               if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
+                       (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
+                       struct task_status_struct *ts;
+
+                       pm8001_ha_dev->id &= 0xDFFFFFFF;
+                       ts = &task->task_status;
+
+                       spin_lock_irqsave(&task->task_state_lock, flags);
+                       ts->resp = SAS_TASK_COMPLETE;
+                       ts->stat = SAM_STAT_GOOD;
+                       task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+                       task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+                       task->task_state_flags |= SAS_TASK_STATE_DONE;
+                       if (unlikely((task->task_state_flags &
+                                       SAS_TASK_STATE_ABORTED))) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               PM8001_FAIL_DBG(pm8001_ha,
+                                       pm8001_printk("task 0x%p resp 0x%x "
+                                       " stat 0x%x but aborted by upper layer "
+                                       "\n", task, ts->resp, ts->stat));
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               return 0;
+                       } else if (task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/* ditto */
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       } else if (!task->uldd_task) {
+                               spin_unlock_irqrestore(&task->task_state_lock,
+                                                       flags);
+                               pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
+                               mb();/*ditto*/
+                               spin_unlock_irq(&pm8001_ha->lock);
+                               task->task_done(task);
+                               spin_lock_irq(&pm8001_ha->lock);
+                               return 0;
+                       }
+               }
+       }
+
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+                                               &sata_cmd, outb++);
+
+       /* rotate the outb queue */
+       outb = outb%PM8001_MAX_SPCV_OUTB_NUM;
+       return ret;
+}
+
+/**
+ * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int
+pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
+{
+       struct phy_start_req payload;
+       struct inbound_queue_table *circularQ;
+       int ret;
+       u32 tag = 0x01;
+       u32 opcode = OPC_INB_PHYSTART;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       memset(&payload, 0, sizeof(payload));
+       payload.tag = cpu_to_le32(tag);
+
+       PM8001_INIT_DBG(pm8001_ha,
+               pm8001_printk("PHY START REQ for phy_id %d\n", phy_id));
+       /*
+        ** [0:7]       PHY Identifier
+        ** [8:11]      link rate 1.5G, 3G, 6G
+        ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode
+        ** [14]        0b disable spin up hold; 1b enable spin up hold
+        ** [15] ob no change in current PHY analig setup 1b enable using SPAST
+        */
+       payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE |
+                       LINKMODE_AUTO | LINKRATE_15 |
+                       LINKRATE_30 | LINKRATE_60 | phy_id);
+       /* SSC Disable and SAS Analog ST configuration */
+       /**
+       payload.ase_sh_lm_slr_phyid =
+               cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE |
+               LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 |
+               phy_id);
+       Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need
+       **/
+
+       payload.sas_identify.dev_type = SAS_END_DEVICE;
+       payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL;
+       memcpy(payload.sas_identify.sas_addr,
+               pm8001_ha->sas_addr, SAS_ADDR_SIZE);
+       payload.sas_identify.phy_id = phy_id;
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+       return ret;
+}
+
+/**
+ * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to start up.
+ */
+static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+       u8 phy_id)
+{
+       struct phy_stop_req payload;
+       struct inbound_queue_table *circularQ;
+       int ret;
+       u32 tag = 0x01;
+       u32 opcode = OPC_INB_PHYSTOP;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       memset(&payload, 0, sizeof(payload));
+       payload.tag = cpu_to_le32(tag);
+       payload.phy_id = cpu_to_le32(phy_id);
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0);
+       return ret;
+}
+
+/**
+ * see comments on pm8001_mpi_reg_resp.
+ */
+static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
+       struct pm8001_device *pm8001_dev, u32 flag)
+{
+       struct reg_dev_req payload;
+       u32     opc;
+       u32 stp_sspsmp_sata = 0x4;
+       struct inbound_queue_table *circularQ;
+       u32 linkrate, phy_id;
+       int rc, tag = 0xdeadbeef;
+       struct pm8001_ccb_info *ccb;
+       u8 retryFlag = 0x1;
+       u16 firstBurstSize = 0;
+       u16 ITNT = 2000;
+       struct domain_device *dev = pm8001_dev->sas_device;
+       struct domain_device *parent_dev = dev->parent;
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+
+       memset(&payload, 0, sizeof(payload));
+       rc = pm8001_tag_alloc(pm8001_ha, &tag);
+       if (rc)
+               return rc;
+       ccb = &pm8001_ha->ccb_info[tag];
+       ccb->device = pm8001_dev;
+       ccb->ccb_tag = tag;
+       payload.tag = cpu_to_le32(tag);
+
+       if (flag == 1) {
+               stp_sspsmp_sata = 0x02; /*direct attached sata */
+       } else {
+               if (pm8001_dev->dev_type == SAS_SATA_DEV)
+                       stp_sspsmp_sata = 0x00; /* stp*/
+               else if (pm8001_dev->dev_type == SAS_END_DEVICE ||
+                       pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE ||
+                       pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)
+                       stp_sspsmp_sata = 0x01; /*ssp or smp*/
+       }
+       if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+               phy_id = parent_dev->ex_dev.ex_phy->phy_id;
+       else
+               phy_id = pm8001_dev->attached_phy;
+
+       opc = OPC_INB_REG_DEV;
+
+       linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
+                       pm8001_dev->sas_device->linkrate : dev->port->linkrate;
+
+       payload.phyid_portid =
+               cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) |
+               ((phy_id & 0xFF) << 8));
+
+       payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) |
+               ((linkrate & 0x0F) << 24) |
+               ((stp_sspsmp_sata & 0x03) << 28));
+       payload.firstburstsize_ITNexustimeout =
+               cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
+
+       memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
+               SAS_ADDR_SIZE);
+
+       rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+
+       return rc;
+}
+
+/**
+ * pm80xx_chip_phy_ctl_req - support the local phy operation
+ * @pm8001_ha: our hba card information.
+ * @num: the inbound queue number
+ * @phy_id: the phy id which we wanted to operate
+ * @phy_op:
+ */
+static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
+       u32 phyId, u32 phy_op)
+{
+       struct local_phy_ctl_req payload;
+       struct inbound_queue_table *circularQ;
+       int ret;
+       u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
+       memset(&payload, 0, sizeof(payload));
+       circularQ = &pm8001_ha->inbnd_q_tbl[0];
+       payload.tag = cpu_to_le32(1);
+       payload.phyop_phyid =
+               cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF));
+       ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+       return ret;
+}
+
+static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
+{
+       u32 value;
+#ifdef PM8001_USE_MSIX
+       return 1;
+#endif
+       value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR);
+       if (value)
+               return 1;
+       return 0;
+
+}
+
+/**
+ * pm8001_chip_isr - PM8001 isr handler.
+ * @pm8001_ha: our hba card information.
+ * @irq: irq number.
+ * @stat: stat.
+ */
+static irqreturn_t
+pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)
+{
+       pm80xx_chip_interrupt_disable(pm8001_ha, vec);
+       process_oq(pm8001_ha, vec);
+       pm80xx_chip_interrupt_enable(pm8001_ha, vec);
+       return IRQ_HANDLED;
+}
+
+const struct pm8001_dispatch pm8001_80xx_dispatch = {
+       .name                   = "pmc80xx",
+       .chip_init              = pm80xx_chip_init,
+       .chip_soft_rst          = pm80xx_chip_soft_rst,
+       .chip_rst               = pm80xx_hw_chip_rst,
+       .chip_iounmap           = pm8001_chip_iounmap,
+       .isr                    = pm80xx_chip_isr,
+       .is_our_interupt        = pm80xx_chip_is_our_interupt,
+       .isr_process_oq         = process_oq,
+       .interrupt_enable       = pm80xx_chip_interrupt_enable,
+       .interrupt_disable      = pm80xx_chip_interrupt_disable,
+       .make_prd               = pm8001_chip_make_sg,
+       .smp_req                = pm80xx_chip_smp_req,
+       .ssp_io_req             = pm80xx_chip_ssp_io_req,
+       .sata_req               = pm80xx_chip_sata_req,
+       .phy_start_req          = pm80xx_chip_phy_start_req,
+       .phy_stop_req           = pm80xx_chip_phy_stop_req,
+       .reg_dev_req            = pm80xx_chip_reg_dev_req,
+       .dereg_dev_req          = pm8001_chip_dereg_dev_req,
+       .phy_ctl_req            = pm80xx_chip_phy_ctl_req,
+       .task_abort             = pm8001_chip_abort_task,
+       .ssp_tm_req             = pm8001_chip_ssp_tm_req,
+       .get_nvmd_req           = pm8001_chip_get_nvmd_req,
+       .set_nvmd_req           = pm8001_chip_set_nvmd_req,
+       .fw_flash_update_req    = pm8001_chip_fw_flash_update_req,
+       .set_dev_state_req      = pm8001_chip_set_dev_state_req,
+};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
new file mode 100644 (file)
index 0000000..2b760ba
--- /dev/null
@@ -0,0 +1,1523 @@
+/*
+ * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver
+ *
+ * Copyright (c) 2008-2009 USI Co., Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions, and the following disclaimer,
+ *     without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *     substantially similar to the "NO WARRANTY" disclaimer below
+ *     ("Disclaimer") and any redistribution must be conditioned upon
+ *     including a substantially similar Disclaimer requirement for further
+ *     binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *     of any contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ *
+ */
+
+#ifndef _PMC8001_REG_H_
+#define _PMC8001_REG_H_
+
+#include <linux/types.h>
+#include <scsi/libsas.h>
+
+/* for Request Opcode of IOMB */
+#define OPC_INB_ECHO                           1       /* 0x000 */
+#define OPC_INB_PHYSTART                       4       /* 0x004 */
+#define OPC_INB_PHYSTOP                                5       /* 0x005 */
+#define OPC_INB_SSPINIIOSTART                  6       /* 0x006 */
+#define OPC_INB_SSPINITMSTART                  7       /* 0x007 */
+/* 0x8 RESV IN SPCv */
+#define OPC_INB_RSVD                           8       /* 0x008 */
+#define OPC_INB_DEV_HANDLE_ACCEPT              9       /* 0x009 */
+#define OPC_INB_SSPTGTIOSTART                  10      /* 0x00A */
+#define OPC_INB_SSPTGTRSPSTART                 11      /* 0x00B */
+/* 0xC, 0xD, 0xE removed in SPCv */
+#define OPC_INB_SSP_ABORT                      15      /* 0x00F */
+#define OPC_INB_DEREG_DEV_HANDLE               16      /* 0x010 */
+#define OPC_INB_GET_DEV_HANDLE                 17      /* 0x011 */
+#define OPC_INB_SMP_REQUEST                    18      /* 0x012 */
+/* 0x13 SMP_RESPONSE is removed in SPCv */
+#define OPC_INB_SMP_ABORT                      20      /* 0x014 */
+/* 0x16 RESV IN SPCv */
+#define OPC_INB_RSVD1                          22      /* 0x016 */
+#define OPC_INB_SATA_HOST_OPSTART              23      /* 0x017 */
+#define OPC_INB_SATA_ABORT                     24      /* 0x018 */
+#define OPC_INB_LOCAL_PHY_CONTROL              25      /* 0x019 */
+/* 0x1A RESV IN SPCv */
+#define OPC_INB_RSVD2                          26      /* 0x01A */
+#define OPC_INB_FW_FLASH_UPDATE                        32      /* 0x020 */
+#define OPC_INB_GPIO                           34      /* 0x022 */
+#define OPC_INB_SAS_DIAG_MODE_START_END                35      /* 0x023 */
+#define OPC_INB_SAS_DIAG_EXECUTE               36      /* 0x024 */
+/* 0x25 RESV IN SPCv */
+#define OPC_INB_RSVD3                          37      /* 0x025 */
+#define OPC_INB_GET_TIME_STAMP                 38      /* 0x026 */
+#define OPC_INB_PORT_CONTROL                   39      /* 0x027 */
+#define OPC_INB_GET_NVMD_DATA                  40      /* 0x028 */
+#define OPC_INB_SET_NVMD_DATA                  41      /* 0x029 */
+#define OPC_INB_SET_DEVICE_STATE               42      /* 0x02A */
+#define OPC_INB_GET_DEVICE_STATE               43      /* 0x02B */
+#define OPC_INB_SET_DEV_INFO                   44      /* 0x02C */
+/* 0x2D RESV IN SPCv */
+#define OPC_INB_RSVD4                          45      /* 0x02D */
+#define OPC_INB_SGPIO_REGISTER                 46      /* 0x02E */
+#define OPC_INB_PCIE_DIAG_EXEC                 47      /* 0x02F */
+#define OPC_INB_SET_CONTROLLER_CONFIG          48      /* 0x030 */
+#define OPC_INB_GET_CONTROLLER_CONFIG          49      /* 0x031 */
+#define OPC_INB_REG_DEV                                50      /* 0x032 */
+#define OPC_INB_SAS_HW_EVENT_ACK               51      /* 0x033 */
+#define OPC_INB_GET_DEVICE_INFO                        52      /* 0x034 */
+#define OPC_INB_GET_PHY_PROFILE                        53      /* 0x035 */
+#define OPC_INB_FLASH_OP_EXT                   54      /* 0x036 */
+#define OPC_INB_SET_PHY_PROFILE                        55      /* 0x037 */
+#define OPC_INB_KEK_MANAGEMENT                 256     /* 0x100 */
+#define OPC_INB_DEK_MANAGEMENT                 257     /* 0x101 */
+#define OPC_INB_SSP_INI_DIF_ENC_IO             258     /* 0x102 */
+#define OPC_INB_SATA_DIF_ENC_IO                        259     /* 0x103 */
+
+/* for Response Opcode of IOMB */
+#define OPC_OUB_ECHO                                   1       /* 0x001 */
+#define OPC_OUB_RSVD                                   4       /* 0x004 */
+#define OPC_OUB_SSP_COMP                               5       /* 0x005 */
+#define OPC_OUB_SMP_COMP                               6       /* 0x006 */
+#define OPC_OUB_LOCAL_PHY_CNTRL                                7       /* 0x007 */
+#define OPC_OUB_RSVD1                                  10      /* 0x00A */
+#define OPC_OUB_DEREG_DEV                              11      /* 0x00B */
+#define OPC_OUB_GET_DEV_HANDLE                         12      /* 0x00C */
+#define OPC_OUB_SATA_COMP                              13      /* 0x00D */
+#define OPC_OUB_SATA_EVENT                             14      /* 0x00E */
+#define OPC_OUB_SSP_EVENT                              15      /* 0x00F */
+#define OPC_OUB_RSVD2                                  16      /* 0x010 */
+/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/
+#define OPC_OUB_SSP_RECV_EVENT                         18      /* 0x012 */
+#define OPC_OUB_RSVD3                                  19      /* 0x013 */
+#define OPC_OUB_FW_FLASH_UPDATE                                20      /* 0x014 */
+#define OPC_OUB_GPIO_RESPONSE                          22      /* 0x016 */
+#define OPC_OUB_GPIO_EVENT                             23      /* 0x017 */
+#define OPC_OUB_GENERAL_EVENT                          24      /* 0x018 */
+#define OPC_OUB_SSP_ABORT_RSP                          26      /* 0x01A */
+#define OPC_OUB_SATA_ABORT_RSP                         27      /* 0x01B */
+#define OPC_OUB_SAS_DIAG_MODE_START_END                        28      /* 0x01C */
+#define OPC_OUB_SAS_DIAG_EXECUTE                       29      /* 0x01D */
+#define OPC_OUB_GET_TIME_STAMP                         30      /* 0x01E */
+#define OPC_OUB_RSVD4                                  31      /* 0x01F */
+#define OPC_OUB_PORT_CONTROL                           32      /* 0x020 */
+#define OPC_OUB_SKIP_ENTRY                             33      /* 0x021 */
+#define OPC_OUB_SMP_ABORT_RSP                          34      /* 0x022 */
+#define OPC_OUB_GET_NVMD_DATA                          35      /* 0x023 */
+#define OPC_OUB_SET_NVMD_DATA                          36      /* 0x024 */
+#define OPC_OUB_DEVICE_HANDLE_REMOVAL                  37      /* 0x025 */
+#define OPC_OUB_SET_DEVICE_STATE                       38      /* 0x026 */
+#define OPC_OUB_GET_DEVICE_STATE                       39      /* 0x027 */
+#define OPC_OUB_SET_DEV_INFO                           40      /* 0x028 */
+#define OPC_OUB_RSVD5                                  41      /* 0x029 */
+#define OPC_OUB_HW_EVENT                               1792    /* 0x700 */
+#define OPC_OUB_DEV_HANDLE_ARRIV                       1824    /* 0x720 */
+#define OPC_OUB_THERM_HW_EVENT                         1840    /* 0x730 */
+#define OPC_OUB_SGPIO_RESP                             2094    /* 0x82E */
+#define OPC_OUB_PCIE_DIAG_EXECUTE                      2095    /* 0x82F */
+#define OPC_OUB_DEV_REGIST                             2098    /* 0x832 */
+#define OPC_OUB_SAS_HW_EVENT_ACK                       2099    /* 0x833 */
+#define OPC_OUB_GET_DEVICE_INFO                                2100    /* 0x834 */
+/* spcv specific commands */
+#define OPC_OUB_PHY_START_RESP                         2052    /* 0x804 */
+#define OPC_OUB_PHY_STOP_RESP                          2053    /* 0x805 */
+#define OPC_OUB_SET_CONTROLLER_CONFIG                  2096    /* 0x830 */
+#define OPC_OUB_GET_CONTROLLER_CONFIG                  2097    /* 0x831 */
+#define OPC_OUB_GET_PHY_PROFILE                                2101    /* 0x835 */
+#define OPC_OUB_FLASH_OP_EXT                           2102    /* 0x836 */
+#define OPC_OUB_SET_PHY_PROFILE                                2103    /* 0x837 */
+#define OPC_OUB_KEK_MANAGEMENT_RESP                    2304    /* 0x900 */
+#define OPC_OUB_DEK_MANAGEMENT_RESP                    2305    /* 0x901 */
+#define OPC_OUB_SSP_COALESCED_COMP_RESP                        2306    /* 0x902 */
+
+/* for phy start*/
+#define SSC_DISABLE_15                 (0x01 << 16)
+#define SSC_DISABLE_30                 (0x02 << 16)
+#define SSC_DISABLE_60                 (0x04 << 16)
+#define SAS_ASE                                (0x01 << 15)
+#define SPINHOLD_DISABLE               (0x00 << 14)
+#define SPINHOLD_ENABLE                        (0x01 << 14)
+#define LINKMODE_SAS                   (0x01 << 12)
+#define LINKMODE_DSATA                 (0x02 << 12)
+#define LINKMODE_AUTO                  (0x03 << 12)
+#define LINKRATE_15                    (0x01 << 8)
+#define LINKRATE_30                    (0x02 << 8)
+#define LINKRATE_60                    (0x06 << 8)
+
+/* Thermal related */
+#define        THERMAL_ENABLE                  0x1
+#define        THERMAL_LOG_ENABLE              0x1
+#define THERMAL_OP_CODE                        0x6
+#define LTEMPHIL                        70
+#define RTEMPHIL                       100
+
+/* Encryption info */
+#define SCRATCH_PAD3_ENC_DISABLED      0x00000000
+#define SCRATCH_PAD3_ENC_DIS_ERR       0x00000001
+#define SCRATCH_PAD3_ENC_ENA_ERR       0x00000002
+#define SCRATCH_PAD3_ENC_READY         0x00000003
+#define SCRATCH_PAD3_ENC_MASK          SCRATCH_PAD3_ENC_READY
+
+#define SCRATCH_PAD3_XTS_ENABLED               (1 << 14)
+#define SCRATCH_PAD3_SMA_ENABLED               (1 << 4)
+#define SCRATCH_PAD3_SMB_ENABLED               (1 << 5)
+#define SCRATCH_PAD3_SMF_ENABLED               0
+#define SCRATCH_PAD3_SM_MASK                   0x000000F0
+#define SCRATCH_PAD3_ERR_CODE                  0x00FF0000
+
+#define SEC_MODE_SMF                           0x0
+#define SEC_MODE_SMA                           0x100
+#define SEC_MODE_SMB                           0x200
+#define CIPHER_MODE_ECB                                0x00000001
+#define CIPHER_MODE_XTS                                0x00000002
+#define KEK_MGMT_SUBOP_KEYCARDUPDATE           0x4
+
+/* SAS protocol timer configuration page */
+#define SAS_PROTOCOL_TIMER_CONFIG_PAGE  0x04
+#define STP_MCT_TMO                     32
+#define SSP_MCT_TMO                     32
+#define SAS_MAX_OPEN_TIME                              5
+#define SMP_MAX_CONN_TIMER              0xFF
+#define STP_FRM_TIMER                   0
+#define STP_IDLE_TIME                   5 /* 5 us; controller default */
+#define SAS_MFD                         0
+#define SAS_OPNRJT_RTRY_INTVL           2
+#define SAS_DOPNRJT_RTRY_TMO            128
+#define SAS_COPNRJT_RTRY_TMO            128
+
+/*
+  Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
+  Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
+  is DOPNRJT_RTRY_TMO
+*/
+#define SAS_DOPNRJT_RTRY_THR            23438
+#define SAS_COPNRJT_RTRY_THR            23438
+#define SAS_MAX_AIP                     0x200000
+#define IT_NEXUS_TIMEOUT       0x7D0
+#define PORT_RECOVERY_TIMEOUT  ((IT_NEXUS_TIMEOUT/100) + 30)
+
+struct mpi_msg_hdr {
+       __le32  header; /* Bits [11:0] - Message operation code */
+       /* Bits [15:12] - Message Category */
+       /* Bits [21:16] - Outboundqueue ID for the
+       operation completion message */
+       /* Bits [23:22] - Reserved */
+       /* Bits [28:24] - Buffer Count, indicates how
+       many buffer are allocated for the massage */
+       /* Bits [30:29] - Reserved */
+       /* Bits [31] - Message Valid bit */
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to describe enable the phy (128 bytes)
+ */
+struct phy_start_req {
+       __le32  tag;
+       __le32  ase_sh_lm_slr_phyid;
+       struct sas_identify_frame sas_identify; /* 28 Bytes */
+       __le32 spasti;
+       u32     reserved[21];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY Start Command
+ * use to disable the phy (128 bytes)
+ */
+struct phy_stop_req {
+       __le32  tag;
+       __le32  phy_id;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/* set device bits fis - device to host */
+struct set_dev_bits_fis {
+       u8      fis_type;       /* 0xA1*/
+       u8      n_i_pmport;
+       /* b7 : n Bit. Notification bit. If set device needs attention. */
+       /* b6 : i Bit. Interrupt Bit */
+       /* b5-b4: reserved2 */
+       /* b3-b0: PM Port */
+       u8      status;
+       u8      error;
+       u32     _r_a;
+} __attribute__ ((packed));
+/* PIO setup FIS - device to host */
+struct pio_setup_fis {
+       u8      fis_type;       /* 0x5f */
+       u8      i_d_pmPort;
+       /* b7 : reserved */
+       /* b6 : i bit. Interrupt bit */
+       /* b5 : d bit. data transfer direction. set to 1 for device to host
+       xfer */
+       /* b4 : reserved */
+       /* b3-b0: PM Port */
+       u8      status;
+       u8      error;
+       u8      lbal;
+       u8      lbam;
+       u8      lbah;
+       u8      device;
+       u8      lbal_exp;
+       u8      lbam_exp;
+       u8      lbah_exp;
+       u8      _r_a;
+       u8      sector_count;
+       u8      sector_count_exp;
+       u8      _r_b;
+       u8      e_status;
+       u8      _r_c[2];
+       u8      transfer_count;
+} __attribute__ ((packed));
+
+/*
+ * brief the data structure of SATA Completion Response
+ * use to describe the sata task response (64 bytes)
+ */
+struct sata_completion_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  param;
+       u32     sata_resp[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SAS HW Event Notification
+ * use to alert the host about the hardware event(64 bytes)
+ */
+/* updated outbound struct for spcv */
+
+struct hw_event_resp {
+       __le32  lr_status_evt_portid;
+       __le32  evt_param;
+       __le32  phyid_npip_portstate;
+       struct sas_identify_frame       sas_identify;
+       struct dev_to_host_fis  sata_fis;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure for thermal event notification
+ */
+
+struct thermal_hw_event {
+       __le32  thermal_event;
+       __le32  rht_lht;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of REGISTER DEVICE Command
+ * use to describe MPI REGISTER DEVICE Command (64 bytes)
+ */
+
+struct reg_dev_req {
+       __le32  tag;
+       __le32  phyid_portid;
+       __le32  dtype_dlr_mcn_ir_retry;
+       __le32  firstburstsize_ITNexustimeout;
+       u8      sas_addr[SAS_ADDR_SIZE];
+       __le32  upper_device_id;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of DEREGISTER DEVICE Command
+ * use to request spc to remove all internal resources associated
+ * with the device id (64 bytes)
+ */
+
+struct dereg_dev_req {
+       __le32  tag;
+       __le32  device_id;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of DEVICE_REGISTRATION Response
+ * use to notify the completion of the device registration (64 bytes)
+ */
+struct dev_reg_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  device_id;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of Local PHY Control Command
+ * use to issue PHY CONTROL to local phy (64 bytes)
+ */
+struct local_phy_ctl_req {
+       __le32  tag;
+       __le32  phyop_phyid;
+       u32     reserved1[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Local Phy Control Response
+ * use to describe MPI Local Phy Control Response (64 bytes)
+ */
+ struct local_phy_ctl_resp {
+       __le32  tag;
+       __le32  phyop_phyid;
+       __le32  status;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+#define OP_BITS 0x0000FF00
+#define ID_BITS 0x000000FF
+
+/*
+ * brief the data structure of PORT Control Command
+ * use to control port properties (64 bytes)
+ */
+
+struct port_ctl_req {
+       __le32  tag;
+       __le32  portop_portid;
+       __le32  param0;
+       __le32  param1;
+       u32     reserved1[27];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of HW Event Ack Command
+ * use to acknowledge receive HW event (64 bytes)
+ */
+struct hw_event_ack_req {
+       __le32  tag;
+       __le32  phyid_sea_portid;
+       __le32  param0;
+       __le32  param1;
+       u32     reserved1[27];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY_START Response Command
+ * indicates the completion of PHY_START command (64 bytes)
+ */
+struct phy_start_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  phyid;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of PHY_STOP Response Command
+ * indicates the completion of PHY_STOP command (64 bytes)
+ */
+struct phy_stop_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  phyid;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP Completion Response
+ * use to indicate a SSP Completion (n bytes)
+ */
+struct ssp_completion_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  param;
+       __le32  ssptag_rescv_rescpad;
+       struct ssp_response_iu ssp_resp_iu;
+       __le32  residual_count;
+} __attribute__((packed, aligned(4)));
+
+#define SSP_RESCV_BIT  0x00010000
+
+/*
+ * brief the data structure of SATA EVNET response
+ * use to indicate a SATA Completion (64 bytes)
+ */
+struct sata_event_resp {
+       __le32 tag;
+       __le32 event;
+       __le32 port_id;
+       __le32 device_id;
+       u32 reserved;
+       __le32 event_param0;
+       __le32 event_param1;
+       __le32 sata_addr_h32;
+       __le32 sata_addr_l32;
+       __le32 e_udt1_udt0_crc;
+       __le32 e_udt5_udt4_udt3_udt2;
+       __le32 a_udt1_udt0_crc;
+       __le32 a_udt5_udt4_udt3_udt2;
+       __le32 hwdevid_diferr;
+       __le32 err_framelen_byteoffset;
+       __le32 err_dataframe;
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SSP EVNET esponse
+ * use to indicate a SSP Completion (64 bytes)
+ */
+struct ssp_event_resp {
+       __le32 tag;
+       __le32 event;
+       __le32 port_id;
+       __le32 device_id;
+       __le32 ssp_tag;
+       __le32 event_param0;
+       __le32 event_param1;
+       __le32 sas_addr_h32;
+       __le32 sas_addr_l32;
+       __le32 e_udt1_udt0_crc;
+       __le32 e_udt5_udt4_udt3_udt2;
+       __le32 a_udt1_udt0_crc;
+       __le32 a_udt5_udt4_udt3_udt2;
+       __le32 hwdevid_diferr;
+       __le32 err_framelen_byteoffset;
+       __le32 err_dataframe;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of General Event Notification Response
+ * use to describe MPI General Event Notification Response (64 bytes)
+ */
+struct general_event_resp {
+       __le32  status;
+       __le32  inb_IOMB_payload[14];
+} __attribute__((packed, aligned(4)));
+
+#define GENERAL_EVENT_PAYLOAD  14
+#define OPCODE_BITS    0x00000fff
+
+/*
+ * brief the data structure of SMP Request Command
+ * use to describe MPI SMP REQUEST Command (64 bytes)
+ */
+struct smp_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  len_ip_ir;
+       /* Bits [0] - Indirect response */
+       /* Bits [1] - Indirect Payload */
+       /* Bits [15:2] - Reserved */
+       /* Bits [23:16] - direct payload Len */
+       /* Bits [31:24] - Reserved */
+       u8      smp_req16[16];
+       union {
+               u8      smp_req[32];
+               struct {
+                       __le64 long_req_addr;/* sg dma address, LE */
+                       __le32 long_req_size;/* LE */
+                       u32     _r_a;
+                       __le64 long_resp_addr;/* sg dma address, LE */
+                       __le32 long_resp_size;/* LE */
+                       u32     _r_b;
+                       } long_smp_req;/* sequencer extension */
+       };
+       __le32  rsvd[16];
+} __attribute__((packed, aligned(4)));
+/*
+ * brief the data structure of SMP Completion Response
+ * use to describe MPI SMP Completion Response (64 bytes)
+ */
+struct smp_completion_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  param;
+       u8      _r_a[252];
+} __attribute__((packed, aligned(4)));
+
+/*
+ *brief the data structure of SSP SMP SATA Abort Command
+ * use to describe MPI SSP SMP & SATA Abort Command (64 bytes)
+ */
+struct task_abort_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  tag_to_abort;
+       __le32  abort_all;
+       u32     reserved[27];
+} __attribute__((packed, aligned(4)));
+
+/* These flags used for SSP SMP & SATA Abort */
+#define ABORT_MASK             0x3
+#define ABORT_SINGLE           0x0
+#define ABORT_ALL              0x1
+
+/**
+ * brief the data structure of SSP SATA SMP Abort Response
+ * use to describe SSP SMP & SATA Abort Response ( 64 bytes)
+ */
+struct task_abort_resp {
+       __le32  tag;
+       __le32  status;
+       __le32  scp;
+       u32     reserved[12];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Command
+ * use to describe MPI SAS Diagnostic Start/End Command (64 bytes)
+ */
+struct sas_diag_start_end_req {
+       __le32  tag;
+       __le32  operation_phyid;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Command
+ * use to describe MPI SAS Diagnostic Execute Command (64 bytes)
+ */
+struct sas_diag_execute_req {
+       __le32  tag;
+       __le32  cmdtype_cmddesc_phyid;
+       __le32  pat1_pat2;
+       __le32  threshold;
+       __le32  codepat_errmsk;
+       __le32  pmon;
+       __le32  pERF1CTL;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+#define SAS_DIAG_PARAM_BYTES 24
+
+/*
+ * brief the data structure of Set Device State Command
+ * use to describe MPI Set Device State Command (64 bytes)
+ */
+struct set_dev_state_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  nds;
+       u32     reserved[28];
+} __attribute__((packed, aligned(4)));
+
+/*
+ * brief the data structure of SATA Start Command
+ * use to describe MPI SATA IO Start Command (64 bytes)
+ * Note: This structure is common for normal / encryption I/O
+ */
+
+struct sata_start_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  data_len;
+       __le32  ncqtag_atap_dir_m_dad;
+       struct host_to_dev_fis  sata_fis;
+       u32     reserved1;
+       u32     reserved2;      /* dword 11. rsvd for normal I/O. */
+                               /* EPLE Descl for enc I/O */
+       u32     addr_low;       /* dword 12. rsvd for enc I/O */
+       u32     addr_high;      /* dword 13. reserved for enc I/O */
+       __le32  len;            /* dword 14: length for normal I/O. */
+                               /* EPLE Desch for enc I/O */
+       __le32  esgl;           /* dword 15. rsvd for enc I/O */
+       __le32  atapi_scsi_cdb[4];      /* dword 16-19. rsvd for enc I/O */
+       /* The below fields are reserved for normal I/O */
+       __le32  key_index_mode; /* dword 20 */
+       __le32  sector_cnt_enss;/* dword 21 */
+       __le32  keytagl;        /* dword 22 */
+       __le32  keytagh;        /* dword 23 */
+       __le32  twk_val0;       /* dword 24 */
+       __le32  twk_val1;       /* dword 25 */
+       __le32  twk_val2;       /* dword 26 */
+       __le32  twk_val3;       /* dword 27 */
+       __le32  enc_addr_low;   /* dword 28. Encryption SGL address high */
+       __le32  enc_addr_high;  /* dword 29. Encryption SGL address low */
+       __le32  enc_len;        /* dword 30. Encryption length */
+       __le32  enc_esgl;       /* dword 31. Encryption esgl bit */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI TM Start Command
+ * use to describe MPI SSP INI TM Start Command (64 bytes)
+ */
+struct ssp_ini_tm_start_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  relate_tag;
+       __le32  tmf;
+       u8      lun[8];
+       __le32  ds_ads_m;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+struct ssp_info_unit {
+       u8      lun[8];/* SCSI Logical Unit Number */
+       u8      reserved1;/* reserved */
+       u8      efb_prio_attr;
+       /* B7 : enabledFirstBurst */
+       /* B6-3 : taskPriority */
+       /* B2-0 : taskAttribute */
+       u8      reserved2;      /* reserved */
+       u8      additional_cdb_len;
+       /* B7-2 : additional_cdb_len */
+       /* B1-0 : reserved */
+       u8      cdb[16];/* The SCSI CDB up to 16 bytes length */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SSP INI IO Start Command
+ * use to describe MPI SSP INI IO Start Command (64 bytes)
+ * Note: This structure is common for normal / encryption I/O
+ */
+struct ssp_ini_io_start_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  data_len;
+       __le32  dad_dir_m_tlr;
+       struct ssp_info_unit    ssp_iu;
+       __le32  addr_low;       /* dword 12: sgl low for normal I/O. */
+                               /* epl_descl for encryption I/O */
+       __le32  addr_high;      /* dword 13: sgl hi for normal I/O */
+                               /* dpl_descl for encryption I/O */
+       __le32  len;            /* dword 14: len for normal I/O. */
+                               /* edpl_desch for encryption I/O */
+       __le32  esgl;           /* dword 15: ESGL bit for normal I/O. */
+                               /* user defined tag mask for enc I/O */
+       /* The below fields are reserved for normal I/O */
+       u8      udt[12];        /* dword 16-18 */
+       __le32  sectcnt_ios;    /* dword 19 */
+       __le32  key_cmode;      /* dword 20 */
+       __le32  ks_enss;        /* dword 21 */
+       __le32  keytagl;        /* dword 22 */
+       __le32  keytagh;        /* dword 23 */
+       __le32  twk_val0;       /* dword 24 */
+       __le32  twk_val1;       /* dword 25 */
+       __le32  twk_val2;       /* dword 26 */
+       __le32  twk_val3;       /* dword 27 */
+       __le32  enc_addr_low;   /* dword 28: Encryption sgl addr low */
+       __le32  enc_addr_high;  /* dword 29: Encryption sgl addr hi */
+       __le32  enc_len;        /* dword 30: Encryption length */
+       __le32  enc_esgl;       /* dword 31: ESGL bit for encryption */
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND
+ * use to initiate SSP I/O operation with optional DIF/ENC
+ */
+struct ssp_dif_enc_io_req {
+       __le32  tag;
+       __le32  device_id;
+       __le32  data_len;
+       __le32  dirMTlr;
+       __le32  sspiu0;
+       __le32  sspiu1;
+       __le32  sspiu2;
+       __le32  sspiu3;
+       __le32  sspiu4;
+       __le32  sspiu5;
+       __le32  sspiu6;
+       __le32  epl_des;
+       __le32  dpl_desl_ndplr;
+       __le32  dpl_desh;
+       __le32  uum_uuv_bss_difbits;
+       u8      udt[12];
+       __le32  sectcnt_ios;
+       __le32  key_cmode;
+       __le32  ks_enss;
+       __le32  keytagl;
+       __le32  keytagh;
+       __le32  twk_val0;
+       __le32  twk_val1;
+       __le32  twk_val2;
+       __le32  twk_val3;
+       __le32  addr_low;
+       __le32  addr_high;
+       __le32  len;
+       __le32  esgl;
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Firmware download
+ * use to describe MPI FW DOWNLOAD Command (64 bytes)
+ */
+struct fw_flash_Update_req {
+       __le32  tag;
+       __le32  cur_image_offset;
+       __le32  cur_image_len;
+       __le32  total_image_len;
+       u32     reserved0[7];
+       __le32  sgl_addr_lo;
+       __le32  sgl_addr_hi;
+       __le32  len;
+       __le32  ext_reserved;
+       u32     reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+#define FWFLASH_IOMB_RESERVED_LEN 0x07
+/**
+ * brief the data structure of FW_FLASH_UPDATE Response
+ * use to describe MPI FW_FLASH_UPDATE Response (64 bytes)
+ *
+ */
+ struct fw_flash_Update_resp {
+       __le32  tag;
+       __le32  status;
+       u32     reserved[13];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Get NVM Data Command
+ * use to get data from NVM in HBA(64 bytes)
+ */
+struct get_nvm_data_req {
+       __le32  tag;
+       __le32  len_ir_vpdd;
+       __le32  vpd_offset;
+       u32     reserved[8];
+       __le32  resp_addr_lo;
+       __le32  resp_addr_hi;
+       __le32  resp_len;
+       u32     reserved1[17];
+} __attribute__((packed, aligned(4)));
+
+struct set_nvm_data_req {
+       __le32  tag;
+       __le32  len_ir_vpdd;
+       __le32  vpd_offset;
+       u32     reserved[8];
+       __le32  resp_addr_lo;
+       __le32  resp_addr_hi;
+       __le32  resp_len;
+       u32     reserved1[17];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SET CONTROLLER CONFIG COMMAND
+ * use to modify controller configuration
+ */
+struct set_ctrl_cfg_req {
+       __le32  tag;
+       __le32  cfg_pg[14];
+       u32     reserved[16];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for GET CONTROLLER CONFIG COMMAND
+ * use to get controller configuration page
+ */
+struct get_ctrl_cfg_req {
+       __le32  tag;
+       __le32  pgcd;
+       __le32  int_vec;
+       u32     reserved[28];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for KEK_MANAGEMENT COMMAND
+ * use for KEK management
+ */
+struct kek_mgmt_req {
+       __le32  tag;
+       __le32  new_curidx_ksop;
+       u32     reserved;
+       __le32  kblob[12];
+       u32     reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for DEK_MANAGEMENT COMMAND
+ * use for DEK management
+ */
+struct dek_mgmt_req {
+       __le32  tag;
+       __le32  kidx_dsop;
+       __le32  dekidx;
+       __le32  addr_l;
+       __le32  addr_h;
+       __le32  nent;
+       __le32  dbf_tblsize;
+       u32     reserved[24];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for SET PHY PROFILE COMMAND
+ * use to retrive phy specific information
+ */
+struct set_phy_profile_req {
+       __le32  tag;
+       __le32  ppc_phyid;
+       u32     reserved[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for GET PHY PROFILE COMMAND
+ * use to retrive phy specific information
+ */
+struct get_phy_profile_req {
+       __le32  tag;
+       __le32  ppc_phyid;
+       __le32  profile[29];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure for EXT FLASH PARTITION
+ * use to manage ext flash partition
+ */
+struct ext_flash_partition_req {
+       __le32  tag;
+       __le32  cmd;
+       __le32  offset;
+       __le32  len;
+       u32     reserved[7];
+       __le32  addr_low;
+       __le32  addr_high;
+       __le32  len1;
+       __le32  ext;
+       u32     reserved1[16];
+} __attribute__((packed, aligned(4)));
+
+#define TWI_DEVICE     0x0
+#define C_SEEPROM      0x1
+#define VPD_FLASH      0x4
+#define AAP1_RDUMP     0x5
+#define IOP_RDUMP      0x6
+#define EXPAN_ROM      0x7
+
+#define IPMode         0x80000000
+#define NVMD_TYPE      0x0000000F
+#define NVMD_STAT      0x0000FFFF
+#define NVMD_LEN       0xFF000000
+/**
+ * brief the data structure of Get NVMD Data Response
+ * use to describe MPI Get NVMD Data Response (64 bytes)
+ */
+struct get_nvm_data_resp {
+       __le32          tag;
+       __le32          ir_tda_bn_dps_das_nvm;
+       __le32          dlen_status;
+       __le32          nvm_data[12];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Start/End Response
+ * use to describe MPI SAS Diagnostic Start/End Response (64 bytes)
+ *
+ */
+struct sas_diag_start_end_resp {
+       __le32          tag;
+       __le32          status;
+       u32             reserved[13];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of SAS Diagnostic Execute Response
+ * use to describe MPI SAS Diagnostic Execute Response (64 bytes)
+ *
+ */
+struct sas_diag_execute_resp {
+       __le32          tag;
+       __le32          cmdtype_cmddesc_phyid;
+       __le32          Status;
+       __le32          ReportData;
+       u32             reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/**
+ * brief the data structure of Set Device State Response
+ * use to describe MPI Set Device State Response (64 bytes)
+ *
+ */
+struct set_dev_state_resp {
+       __le32          tag;
+       __le32          status;
+       __le32          device_id;
+       __le32          pds_nds;
+       u32             reserved[11];
+} __attribute__((packed, aligned(4)));
+
+/* new outbound structure for spcv - begins */
+/**
+ * brief the data structure for SET CONTROLLER CONFIG COMMAND
+ * use to modify controller configuration
+ */
+struct set_ctrl_cfg_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 err_qlfr_pgcd;
+       u32 reserved[12];
+} __attribute__((packed, aligned(4)));
+
+struct get_ctrl_cfg_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 err_qlfr;
+       __le32 confg_page[12];
+} __attribute__((packed, aligned(4)));
+
+struct kek_mgmt_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 kidx_new_curr_ksop;
+       __le32 err_qlfr;
+       u32 reserved[11];
+} __attribute__((packed, aligned(4)));
+
+struct dek_mgmt_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 kekidx_tbls_dsop;
+       __le32 dekidx;
+       __le32 err_qlfr;
+       u32 reserved[10];
+} __attribute__((packed, aligned(4)));
+
+struct get_phy_profile_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 ppc_phyid;
+       __le32 ppc_specific_rsp[12];
+} __attribute__((packed, aligned(4)));
+
+struct flash_op_ext_resp {
+       __le32 tag;
+       __le32 cmd;
+       __le32 status;
+       __le32 epart_size;
+       __le32 epart_sect_size;
+       u32 reserved[10];
+} __attribute__((packed, aligned(4)));
+
+struct set_phy_profile_resp {
+       __le32 tag;
+       __le32 status;
+       __le32 ppc_phyid;
+       __le32 ppc_specific_rsp[12];
+} __attribute__((packed, aligned(4)));
+
+struct ssp_coalesced_comp_resp {
+       __le32 coal_cnt;
+       __le32 tag0;
+       __le32 ssp_tag0;
+       __le32 tag1;
+       __le32 ssp_tag1;
+       __le32 add_tag_ssp_tag[10];
+} __attribute__((packed, aligned(4)));
+
+/* new outbound structure for spcv - ends */
+
+/* brief data structure for SAS protocol timer configuration page.
+ *
+ */
+struct SASProtocolTimerConfig {
+       __le32 pageCode;                        /* 0 */
+       __le32 MST_MSI;                         /* 1 */
+       __le32 STP_SSP_MCT_TMO;                 /* 2 */
+       __le32 STP_FRM_TMO;                     /* 3 */
+       __le32 STP_IDLE_TMO;                    /* 4 */
+       __le32 OPNRJT_RTRY_INTVL;               /* 5 */
+       __le32 Data_Cmd_OPNRJT_RTRY_TMO;        /* 6 */
+       __le32 Data_Cmd_OPNRJT_RTRY_THR;        /* 7 */
+       __le32 MAX_AIP;                         /* 8 */
+} __attribute__((packed, aligned(4)));
+
+typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
+
+#define NDS_BITS 0x0F
+#define PDS_BITS 0xF0
+
+/*
+ * HW Events type
+ */
+
+#define HW_EVENT_RESET_START                   0x01
+#define HW_EVENT_CHIP_RESET_COMPLETE           0x02
+#define HW_EVENT_PHY_STOP_STATUS               0x03
+#define HW_EVENT_SAS_PHY_UP                    0x04
+#define HW_EVENT_SATA_PHY_UP                   0x05
+#define HW_EVENT_SATA_SPINUP_HOLD              0x06
+#define HW_EVENT_PHY_DOWN                      0x07
+#define HW_EVENT_PORT_INVALID                  0x08
+#define HW_EVENT_BROADCAST_CHANGE              0x09
+#define HW_EVENT_PHY_ERROR                     0x0A
+#define HW_EVENT_BROADCAST_SES                 0x0B
+#define HW_EVENT_INBOUND_CRC_ERROR             0x0C
+#define HW_EVENT_HARD_RESET_RECEIVED           0x0D
+#define HW_EVENT_MALFUNCTION                   0x0E
+#define HW_EVENT_ID_FRAME_TIMEOUT              0x0F
+#define HW_EVENT_BROADCAST_EXP                 0x10
+#define HW_EVENT_PHY_START_STATUS              0x11
+#define HW_EVENT_LINK_ERR_INVALID_DWORD                0x12
+#define HW_EVENT_LINK_ERR_DISPARITY_ERROR      0x13
+#define HW_EVENT_LINK_ERR_CODE_VIOLATION       0x14
+#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH  0x15
+#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED     0x16
+#define HW_EVENT_PORT_RECOVERY_TIMER_TMO       0x17
+#define HW_EVENT_PORT_RECOVER                  0x18
+#define HW_EVENT_PORT_RESET_TIMER_TMO          0x19
+#define HW_EVENT_PORT_RESET_COMPLETE           0x20
+#define EVENT_BROADCAST_ASYNCH_EVENT           0x21
+
+/* port state */
+#define PORT_NOT_ESTABLISHED                   0x00
+#define PORT_VALID                             0x01
+#define PORT_LOSTCOMM                          0x02
+#define PORT_IN_RESET                          0x04
+#define PORT_3RD_PARTY_RESET                   0x07
+#define PORT_INVALID                           0x08
+
+/*
+ * SSP/SMP/SATA IO Completion Status values
+ */
+
+#define IO_SUCCESS                             0x00
+#define IO_ABORTED                             0x01
+#define IO_OVERFLOW                            0x02
+#define IO_UNDERFLOW                           0x03
+#define IO_FAILED                              0x04
+#define IO_ABORT_RESET                         0x05
+#define IO_NOT_VALID                           0x06
+#define IO_NO_DEVICE                           0x07
+#define IO_ILLEGAL_PARAMETER                   0x08
+#define IO_LINK_FAILURE                                0x09
+#define IO_PROG_ERROR                          0x0A
+
+#define IO_EDC_IN_ERROR                                0x0B
+#define IO_EDC_OUT_ERROR                       0x0C
+#define IO_ERROR_HW_TIMEOUT                    0x0D
+#define IO_XFER_ERROR_BREAK                    0x0E
+#define IO_XFER_ERROR_PHY_NOT_READY            0x0F
+#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED       0x10
+#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION               0x11
+#define IO_OPEN_CNX_ERROR_BREAK                                0x12
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS                        0x13
+#define IO_OPEN_CNX_ERROR_BAD_DESTINATION              0x14
+#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED        0x15
+#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY           0x16
+#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION            0x17
+/* This error code 0x18 is not used on SPCv */
+#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR                        0x18
+#define IO_XFER_ERROR_NAK_RECEIVED                     0x19
+#define IO_XFER_ERROR_ACK_NAK_TIMEOUT                  0x1A
+#define IO_XFER_ERROR_PEER_ABORTED                     0x1B
+#define IO_XFER_ERROR_RX_FRAME                         0x1C
+#define IO_XFER_ERROR_DMA                              0x1D
+#define IO_XFER_ERROR_CREDIT_TIMEOUT                   0x1E
+#define IO_XFER_ERROR_SATA_LINK_TIMEOUT                        0x1F
+#define IO_XFER_ERROR_SATA                             0x20
+
+/* This error code 0x22 is not used on SPCv */
+#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST              0x22
+#define IO_XFER_ERROR_REJECTED_NCQ_MODE                        0x21
+#define IO_XFER_ERROR_ABORTED_NCQ_MODE                 0x23
+#define IO_XFER_OPEN_RETRY_TIMEOUT                     0x24
+/* This error code 0x25 is not used on SPCv */
+#define IO_XFER_SMP_RESP_CONNECTION_ERROR              0x25
+#define IO_XFER_ERROR_UNEXPECTED_PHASE                 0x26
+#define IO_XFER_ERROR_XFER_RDY_OVERRUN                 0x27
+#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED            0x28
+#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT                0x30
+
+/* The following error code 0x31 and 0x32 are not using (obsolete) */
+#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK   0x31
+#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK        0x32
+
+#define IO_XFER_ERROR_OFFSET_MISMATCH                  0x34
+#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN               0x35
+#define IO_XFER_CMD_FRAME_ISSUED                       0x36
+#define IO_ERROR_INTERNAL_SMP_RESOURCE                 0x37
+#define IO_PORT_IN_RESET                               0x38
+#define IO_DS_NON_OPERATIONAL                          0x39
+#define IO_DS_IN_RECOVERY                              0x3A
+#define IO_TM_TAG_NOT_FOUND                            0x3B
+#define IO_XFER_PIO_SETUP_ERROR                                0x3C
+#define IO_SSP_EXT_IU_ZERO_LEN_ERROR                   0x3D
+#define IO_DS_IN_ERROR                                 0x3E
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY             0x3F
+#define IO_ABORT_IN_PROGRESS                           0x40
+#define IO_ABORT_DELAYED                               0x41
+#define IO_INVALID_LENGTH                              0x42
+
+/********** additional response event values *****************/
+
+#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT         0x43
+#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED   0x44
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO       0x45
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST                0x46
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE   0x47
+#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED        0x48
+#define IO_DS_INVALID                                  0x49
+/* WARNING: the value is not contiguous from here */
+#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR    0x52
+#define IO_XFER_DMA_ACTIVATE_TIMEOUT           0x53
+#define IO_XFER_ERROR_INTERNAL_CRC_ERROR       0x54
+#define MPI_IO_RQE_BUSY_FULL                   0x55
+#define IO_XFER_ERR_EOB_DATA_OVERRUN           0x56
+#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME     0x57
+#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED       0x58
+
+#define MPI_ERR_IO_RESOURCE_UNAVAILABLE                0x1004
+#define MPI_ERR_ATAPI_DEVICE_BUSY              0x1024
+
+#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS                0x2040
+/*
+ * An encryption IO request failed due to DEK Key Tag mismatch.
+ * The key tag supplied in the encryption IOMB does not match with
+ * the Key Tag in the referenced DEK Entry.
+ */
+#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH      0x2041
+#define IO_XFR_ERROR_CIPHER_MODE_INVALID       0x2042
+/*
+ * An encryption I/O request failed because the initial value (IV)
+ * in the unwrapped DEK blob didn't match the IV used to unwrap it.
+ */
+#define IO_XFR_ERROR_DEK_IV_MISMATCH           0x2043
+/* An encryption I/O request failed due to an internal RAM ECC or
+ * interface error while unwrapping the DEK. */
+#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR   0x2044
+/* An encryption I/O request failed due to an internal RAM ECC or
+ * interface error while unwrapping the DEK. */
+#define IO_XFR_ERROR_INTERNAL_RAM              0x2045
+/*
+ * An encryption I/O request failed
+ * because the DEK index specified in the I/O was outside the bounds of
+ * the total number of entries in the host DEK table.
+ */
+#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046
+
+/* define DIF IO response error status code */
+#define IO_XFR_ERROR_DIF_MISMATCH                      0x3000
+#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH      0x3001
+#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH                0x3002
+#define IO_XFR_ERROR_DIF_CRC_MISMATCH                  0x3003
+
+/* define operator management response status and error qualifier code */
+#define OPR_MGMT_OP_NOT_SUPPORTED                      0x2060
+#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL         0x2061
+#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND          0x2062
+#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH                0x2063
+#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED      0x2064
+#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL           0x2022
+#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE   0x2023
+/***************** additional response event values ***************/
+
+/* WARNING: This error code must always be the last number.
+ * If you add error code, modify this code also
+ * It is used as an index
+ */
+#define IO_ERROR_UNKNOWN_GENERIC                       0x2023
+
+/* MSGU CONFIGURATION TABLE*/
+
+#define SPCv_MSGU_CFG_TABLE_UPDATE             0x01
+#define SPCv_MSGU_CFG_TABLE_RESET              0x02
+#define SPCv_MSGU_CFG_TABLE_FREEZE             0x04
+#define SPCv_MSGU_CFG_TABLE_UNFREEZE           0x08
+#define MSGU_IBDB_SET                          0x00
+#define MSGU_HOST_INT_STATUS                   0x08
+#define MSGU_HOST_INT_MASK                     0x0C
+#define MSGU_IOPIB_INT_STATUS                  0x18
+#define MSGU_IOPIB_INT_MASK                    0x1C
+#define MSGU_IBDB_CLEAR                                0x20
+
+#define MSGU_MSGU_CONTROL                      0x24
+#define MSGU_ODR                               0x20
+#define MSGU_ODCR                              0x28
+
+#define MSGU_ODMR                              0x30
+#define MSGU_ODMR_U                            0x34
+#define MSGU_ODMR_CLR                          0x38
+#define MSGU_ODMR_CLR_U                                0x3C
+#define MSGU_OD_RSVD                           0x40
+
+#define MSGU_SCRATCH_PAD_0                     0x44
+#define MSGU_SCRATCH_PAD_1                     0x48
+#define MSGU_SCRATCH_PAD_2                     0x4C
+#define MSGU_SCRATCH_PAD_3                     0x50
+#define MSGU_HOST_SCRATCH_PAD_0                        0x54
+#define MSGU_HOST_SCRATCH_PAD_1                        0x58
+#define MSGU_HOST_SCRATCH_PAD_2                        0x5C
+#define MSGU_HOST_SCRATCH_PAD_3                        0x60
+#define MSGU_HOST_SCRATCH_PAD_4                        0x64
+#define MSGU_HOST_SCRATCH_PAD_5                        0x68
+#define MSGU_HOST_SCRATCH_PAD_6                        0x6C
+#define MSGU_HOST_SCRATCH_PAD_7                        0x70
+
+/* bit definition for ODMR register */
+#define ODMR_MASK_ALL                  0xFFFFFFFF/* mask all
+                                       interrupt vector */
+#define ODMR_CLEAR_ALL                 0       /* clear all
+                                       interrupt vector */
+/* bit definition for ODCR register */
+#define ODCR_CLEAR_ALL                 0xFFFFFFFF /* mask all
+                                       interrupt vector*/
+/* MSIX Interupts */
+#define MSIX_TABLE_OFFSET              0x2000
+#define MSIX_TABLE_ELEMENT_SIZE                0x10
+#define MSIX_INTERRUPT_CONTROL_OFFSET  0xC
+#define MSIX_TABLE_BASE                        (MSIX_TABLE_OFFSET + \
+                                       MSIX_INTERRUPT_CONTROL_OFFSET)
+#define MSIX_INTERRUPT_DISABLE         0x1
+#define MSIX_INTERRUPT_ENABLE          0x0
+
+/* state definition for Scratch Pad1 register */
+#define SCRATCH_PAD_RAAE_READY         0x3
+#define SCRATCH_PAD_ILA_READY          0xC
+#define SCRATCH_PAD_BOOT_LOAD_SUCCESS  0x0
+#define SCRATCH_PAD_IOP0_READY         0xC00
+#define SCRATCH_PAD_IOP1_READY         0x3000
+
+/* boot loader state */
+#define SCRATCH_PAD1_BOOTSTATE_MASK            0x70    /* Bit 4-6 */
+#define SCRATCH_PAD1_BOOTSTATE_SUCESS          0x0     /* Load successful */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM     0x10    /* HDA SEEPROM */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP   0x20    /* HDA BootStrap Pins */
+#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET   0x30    /* HDA Soft Reset */
+#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR      0x40    /* HDA critical error */
+#define SCRATCH_PAD1_BOOTSTATE_R1              0x50    /* Reserved */
+#define SCRATCH_PAD1_BOOTSTATE_R2              0x60    /* Reserved */
+#define SCRATCH_PAD1_BOOTSTATE_FATAL           0x70    /* Fatal Error */
+
+ /* state definition for Scratch Pad2 register */
+#define SCRATCH_PAD2_POR               0x00    /* power on state */
+#define SCRATCH_PAD2_SFR               0x01    /* soft reset state */
+#define SCRATCH_PAD2_ERR               0x02    /* error state */
+#define SCRATCH_PAD2_RDY               0x03    /* ready state */
+#define SCRATCH_PAD2_FWRDY_RST         0x04    /* FW rdy for soft reset flag */
+#define SCRATCH_PAD2_IOPRDY_RST                0x08    /* IOP ready for soft reset */
+#define SCRATCH_PAD2_STATE_MASK                0xFFFFFFF4 /* ScratchPad 2
+ Mask, bit1-0 State */
+#define SCRATCH_PAD2_RESERVED          0x000003FC/* Scratch Pad1
+ Reserved bit 2 to 9 */
+
+#define SCRATCH_PAD_ERROR_MASK         0xFFFFFC00 /* Error mask bits */
+#define SCRATCH_PAD_STATE_MASK         0x00000003 /* State Mask bits */
+
+/* main configuration offset - byte offset */
+#define MAIN_SIGNATURE_OFFSET          0x00 /* DWORD 0x00 */
+#define MAIN_INTERFACE_REVISION                0x04 /* DWORD 0x01 */
+#define MAIN_FW_REVISION               0x08 /* DWORD 0x02 */
+#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */
+#define MAIN_MAX_SGL_OFFSET            0x10 /* DWORD 0x04 */
+#define MAIN_CNTRL_CAP_OFFSET          0x14 /* DWORD 0x05 */
+#define MAIN_GST_OFFSET                        0x18 /* DWORD 0x06 */
+#define MAIN_IBQ_OFFSET                        0x1C /* DWORD 0x07 */
+#define MAIN_OBQ_OFFSET                        0x20 /* DWORD 0x08 */
+#define MAIN_IQNPPD_HPPD_OFFSET                0x24 /* DWORD 0x09 */
+
+/* 0x28 - 0x4C - RSVD */
+#define MAIN_EVENT_CRC_CHECK           0x48 /* DWORD 0x12 */
+#define MAIN_EVENT_LOG_ADDR_HI         0x50 /* DWORD 0x14 */
+#define MAIN_EVENT_LOG_ADDR_LO         0x54 /* DWORD 0x15 */
+#define MAIN_EVENT_LOG_BUFF_SIZE       0x58 /* DWORD 0x16 */
+#define MAIN_EVENT_LOG_OPTION          0x5C /* DWORD 0x17 */
+#define MAIN_PCS_EVENT_LOG_ADDR_HI     0x60 /* DWORD 0x18 */
+#define MAIN_PCS_EVENT_LOG_ADDR_LO     0x64 /* DWORD 0x19 */
+#define MAIN_PCS_EVENT_LOG_BUFF_SIZE   0x68 /* DWORD 0x1A */
+#define MAIN_PCS_EVENT_LOG_OPTION      0x6C /* DWORD 0x1B */
+#define MAIN_FATAL_ERROR_INTERRUPT     0x70 /* DWORD 0x1C */
+#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */
+#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */
+#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */
+#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */
+#define MAIN_GPIO_LED_FLAGS_OFFSET     0x84 /* DWORD 0x21 */
+#define MAIN_ANALOG_SETUP_OFFSET       0x88 /* DWORD 0x22 */
+
+#define MAIN_INT_VECTOR_TABLE_OFFSET   0x8C /* DWORD 0x23 */
+#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */
+#define MAIN_PORT_RECOVERY_TIMER       0x94 /* DWORD 0x25 */
+#define MAIN_INT_REASSERTION_DELAY     0x98 /* DWORD 0x26 */
+
+/* Gereral Status Table offset - byte offset */
+#define GST_GSTLEN_MPIS_OFFSET         0x00
+#define GST_IQ_FREEZE_STATE0_OFFSET    0x04
+#define GST_IQ_FREEZE_STATE1_OFFSET    0x08
+#define GST_MSGUTCNT_OFFSET            0x0C
+#define GST_IOPTCNT_OFFSET             0x10
+/* 0x14 - 0x34 - RSVD */
+#define GST_GPIO_INPUT_VAL             0x38
+/* 0x3c - 0x40 - RSVD */
+#define GST_RERRINFO_OFFSET0           0x44
+#define GST_RERRINFO_OFFSET1           0x48
+#define GST_RERRINFO_OFFSET2           0x4c
+#define GST_RERRINFO_OFFSET3           0x50
+#define GST_RERRINFO_OFFSET4           0x54
+#define GST_RERRINFO_OFFSET5           0x58
+#define GST_RERRINFO_OFFSET6           0x5c
+#define GST_RERRINFO_OFFSET7           0x60
+
+/* General Status Table - MPI state */
+#define GST_MPI_STATE_UNINIT           0x00
+#define GST_MPI_STATE_INIT             0x01
+#define GST_MPI_STATE_TERMINATION      0x02
+#define GST_MPI_STATE_ERROR            0x03
+#define GST_MPI_STATE_MASK             0x07
+
+/* Per SAS PHY Attributes */
+
+#define PSPA_PHYSTATE0_OFFSET          0x00 /* Dword V */
+#define PSPA_OB_HW_EVENT_PID0_OFFSET   0x04 /* DWORD V+1 */
+#define PSPA_PHYSTATE1_OFFSET          0x08 /* Dword V+2 */
+#define PSPA_OB_HW_EVENT_PID1_OFFSET   0x0C /* DWORD V+3 */
+#define PSPA_PHYSTATE2_OFFSET          0x10 /* Dword V+4 */
+#define PSPA_OB_HW_EVENT_PID2_OFFSET   0x14 /* DWORD V+5 */
+#define PSPA_PHYSTATE3_OFFSET          0x18 /* Dword V+6 */
+#define PSPA_OB_HW_EVENT_PID3_OFFSET   0x1C /* DWORD V+7 */
+#define PSPA_PHYSTATE4_OFFSET          0x20 /* Dword V+8 */
+#define PSPA_OB_HW_EVENT_PID4_OFFSET   0x24 /* DWORD V+9 */
+#define PSPA_PHYSTATE5_OFFSET          0x28 /* Dword V+10 */
+#define PSPA_OB_HW_EVENT_PID5_OFFSET   0x2C /* DWORD V+11 */
+#define PSPA_PHYSTATE6_OFFSET          0x30 /* Dword V+12 */
+#define PSPA_OB_HW_EVENT_PID6_OFFSET   0x34 /* DWORD V+13 */
+#define PSPA_PHYSTATE7_OFFSET          0x38 /* Dword V+14 */
+#define PSPA_OB_HW_EVENT_PID7_OFFSET   0x3C /* DWORD V+15 */
+#define PSPA_PHYSTATE8_OFFSET          0x40 /* DWORD V+16 */
+#define PSPA_OB_HW_EVENT_PID8_OFFSET   0x44 /* DWORD V+17 */
+#define PSPA_PHYSTATE9_OFFSET          0x48 /* DWORD V+18 */
+#define PSPA_OB_HW_EVENT_PID9_OFFSET   0x4C /* DWORD V+19 */
+#define PSPA_PHYSTATE10_OFFSET         0x50 /* DWORD V+20 */
+#define PSPA_OB_HW_EVENT_PID10_OFFSET  0x54 /* DWORD V+21 */
+#define PSPA_PHYSTATE11_OFFSET         0x58 /* DWORD V+22 */
+#define PSPA_OB_HW_EVENT_PID11_OFFSET  0x5C /* DWORD V+23 */
+#define PSPA_PHYSTATE12_OFFSET         0x60 /* DWORD V+24 */
+#define PSPA_OB_HW_EVENT_PID12_OFFSET  0x64 /* DWORD V+25 */
+#define PSPA_PHYSTATE13_OFFSET         0x68 /* DWORD V+26 */
+#define PSPA_OB_HW_EVENT_PID13_OFFSET  0x6c /* DWORD V+27 */
+#define PSPA_PHYSTATE14_OFFSET         0x70 /* DWORD V+28 */
+#define PSPA_OB_HW_EVENT_PID14_OFFSET  0x74 /* DWORD V+29 */
+#define PSPA_PHYSTATE15_OFFSET         0x78 /* DWORD V+30 */
+#define PSPA_OB_HW_EVENT_PID15_OFFSET  0x7c /* DWORD V+31 */
+/* end PSPA */
+
+/* inbound queue configuration offset - byte offset */
+#define IB_PROPERITY_OFFSET            0x00
+#define IB_BASE_ADDR_HI_OFFSET         0x04
+#define IB_BASE_ADDR_LO_OFFSET         0x08
+#define IB_CI_BASE_ADDR_HI_OFFSET      0x0C
+#define IB_CI_BASE_ADDR_LO_OFFSET      0x10
+#define IB_PIPCI_BAR                   0x14
+#define IB_PIPCI_BAR_OFFSET            0x18
+#define IB_RESERVED_OFFSET             0x1C
+
+/* outbound queue configuration offset - byte offset */
+#define OB_PROPERITY_OFFSET            0x00
+#define OB_BASE_ADDR_HI_OFFSET         0x04
+#define OB_BASE_ADDR_LO_OFFSET         0x08
+#define OB_PI_BASE_ADDR_HI_OFFSET      0x0C
+#define OB_PI_BASE_ADDR_LO_OFFSET      0x10
+#define OB_CIPCI_BAR                   0x14
+#define OB_CIPCI_BAR_OFFSET            0x18
+#define OB_INTERRUPT_COALES_OFFSET     0x1C
+#define OB_DYNAMIC_COALES_OFFSET       0x20
+#define OB_PROPERTY_INT_ENABLE         0x40000000
+
+#define MBIC_NMI_ENABLE_VPE0_IOP       0x000418
+#define MBIC_NMI_ENABLE_VPE0_AAP1      0x000418
+/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */
+#define PCIE_EVENT_INTERRUPT_ENABLE    0x003040
+#define PCIE_EVENT_INTERRUPT           0x003044
+#define PCIE_ERROR_INTERRUPT_ENABLE    0x003048
+#define PCIE_ERROR_INTERRUPT           0x00304C
+
+/* SPCV soft reset */
+#define SPC_REG_SOFT_RESET 0x00001000
+#define SPCv_NORMAL_RESET_VALUE                0x1
+
+#define SPCv_SOFT_RESET_READ_MASK              0xC0
+#define SPCv_SOFT_RESET_NO_RESET               0x0
+#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED   0x40
+#define SPCv_SOFT_RESET_HDA_MODE_OCCURED       0x80
+#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED     0xC0
+
+/* signature definition for host scratch pad0 register */
+#define SPC_SOFT_RESET_SIGNATURE       0x252acbcd
+/* Signature for Soft Reset */
+
+/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */
+#define SPC_REG_RESET                  0x000000/* reset register */
+
+/* bit definition for SPC_RESET register */
+#define SPC_REG_RESET_OSSP             0x00000001
+#define SPC_REG_RESET_RAAE             0x00000002
+#define SPC_REG_RESET_PCS_SPBC         0x00000004
+#define SPC_REG_RESET_PCS_IOP_SS       0x00000008
+#define SPC_REG_RESET_PCS_AAP1_SS      0x00000010
+#define SPC_REG_RESET_PCS_AAP2_SS      0x00000020
+#define SPC_REG_RESET_PCS_LM           0x00000040
+#define SPC_REG_RESET_PCS              0x00000080
+#define SPC_REG_RESET_GSM              0x00000100
+#define SPC_REG_RESET_DDR2             0x00010000
+#define SPC_REG_RESET_BDMA_CORE                0x00020000
+#define SPC_REG_RESET_BDMA_SXCBI       0x00040000
+#define SPC_REG_RESET_PCIE_AL_SXCBI    0x00080000
+#define SPC_REG_RESET_PCIE_PWR         0x00100000
+#define SPC_REG_RESET_PCIE_SFT         0x00200000
+#define SPC_REG_RESET_PCS_SXCBI                0x00400000
+#define SPC_REG_RESET_LMS_SXCBI                0x00800000
+#define SPC_REG_RESET_PMIC_SXCBI       0x01000000
+#define SPC_REG_RESET_PMIC_CORE                0x02000000
+#define SPC_REG_RESET_PCIE_PC_SXCBI    0x04000000
+#define SPC_REG_RESET_DEVICE           0x80000000
+
+/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */
+#define SPCV_IBW_AXI_TRANSLATION_LOW   0x001010
+
+#define MBIC_AAP1_ADDR_BASE            0x060000
+#define MBIC_IOP_ADDR_BASE             0x070000
+#define GSM_ADDR_BASE                  0x0700000
+/* Dynamic map through Bar4 - 0x00700000 */
+#define GSM_CONFIG_RESET               0x00000000
+#define RAM_ECC_DB_ERR                 0x00000018
+#define GSM_READ_ADDR_PARITY_INDIC     0x00000058
+#define GSM_WRITE_ADDR_PARITY_INDIC    0x00000060
+#define GSM_WRITE_DATA_PARITY_INDIC    0x00000068
+#define GSM_READ_ADDR_PARITY_CHECK     0x00000038
+#define GSM_WRITE_ADDR_PARITY_CHECK    0x00000040
+#define GSM_WRITE_DATA_PARITY_CHECK    0x00000048
+
+#define RB6_ACCESS_REG                 0x6A0000
+#define HDAC_EXEC_CMD                  0x0002
+#define HDA_C_PA                       0xcb
+#define HDA_SEQ_ID_BITS                        0x00ff0000
+#define HDA_GSM_OFFSET_BITS            0x00FFFFFF
+#define HDA_GSM_CMD_OFFSET_BITS                0x42C0
+#define HDA_GSM_RSP_OFFSET_BITS                0x42E0
+
+#define MBIC_AAP1_ADDR_BASE            0x060000
+#define MBIC_IOP_ADDR_BASE             0x070000
+#define GSM_ADDR_BASE                  0x0700000
+#define SPC_TOP_LEVEL_ADDR_BASE                0x000000
+#define GSM_CONFIG_RESET_VALUE         0x00003b00
+#define GPIO_ADDR_BASE                 0x00090000
+#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET  0x0000010c
+
+/* RB6 offset */
+#define SPC_RB6_OFFSET                 0x80C0
+/* Magic number of soft reset for RB6 */
+#define RB6_MAGIC_NUMBER_RST           0x1234
+
+/* Device Register status */
+#define DEVREG_SUCCESS                                 0x00
+#define DEVREG_FAILURE_OUT_OF_RESOURCE                 0x01
+#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED       0x02
+#define DEVREG_FAILURE_INVALID_PHY_ID                  0x03
+#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED       0x04
+#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE            0x05
+#define DEVREG_FAILURE_PORT_NOT_VALID_STATE            0x06
+#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID           0x07
+
+#endif
index 317a7fdc3b825064e4a5677f0a64bc5f3e43a8d6..23d607218ae8b8a27e07b51d3752bfae17bbba26 100644 (file)
@@ -24,7 +24,9 @@ config SCSI_QLA_FC
 
        Firmware images can be retrieved from:
 
-               ftp://ftp.qlogic.com/outgoing/linux/firmware/
+               http://ldriver.qlogic.com/firmware/
+
+       They are also included in the linux-firmware tree as well.
 
 config TCM_QLA2XXX
        tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
index 729b74389f83168544fb4ef95afe2e9464193ba6..937fed8cb0388550b619dfae667d452fa1c686a7 100644 (file)
@@ -3003,12 +3003,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
 
        /* Set transfer direction */
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
-               lcmd_pkt->cntrl_flags =
-                   __constant_cpu_to_le16(TMF_WRITE_DATA);
+               lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
                vha->qla_stats.output_bytes += scsi_bufflen(cmd);
        } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
-               lcmd_pkt->cntrl_flags =
-                   __constant_cpu_to_le16(TMF_READ_DATA);
+               lcmd_pkt->cntrl_flags = TMF_READ_DATA;
                vha->qla_stats.input_bytes += scsi_bufflen(cmd);
        }
 
index 5307bf86d5e08d453be2f8d3bd3ca781607b640c..ad72c1d8511162b9465b96265510bdad6dfd1e42 100644 (file)
@@ -644,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
        qla2x00_rel_sp(sp->fcport->vha, sp);
 }
 
-void
+static void
 qla2x00_sp_compl(void *data, void *ptr, int res)
 {
        struct qla_hw_data *ha = (struct qla_hw_data *)data;
index 14fec976f634e1c4804855617c65aae7fcd4a566..fad71ed067ec3525a742fe251634fa429bb781f8 100644 (file)
@@ -507,6 +507,7 @@ static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
        mrb->mbox_cmd = in_mbox[0];
        wmb();
 
+       ha->iocb_cnt += mrb->iocb_cnt;
        ha->isp_ops->queue_iocb(ha);
 exit_mbox_iocb:
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
index a47f99957ba8eb9c37d6e4b46ec25cfe763c24a8..4d231c12463eb38ad29d5b3c4ca25f4af24d99fa 100644 (file)
@@ -2216,14 +2216,14 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
        fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
        fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
        fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
-       fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf);
-       fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf);
+       fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
+       fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
        fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
        fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
        fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
        fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
-       fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn);
-       fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn);
+       fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
+       fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
        fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
        fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
        fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
@@ -5504,9 +5504,9 @@ static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
  * If this is invoked as a result of a userspace call then the entry is marked
  * as nonpersistent using flash_state field.
  **/
-int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
-                                struct dev_db_entry *fw_ddb_entry,
-                                uint16_t *idx, int user)
+static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
+                                       struct dev_db_entry *fw_ddb_entry,
+                                       uint16_t *idx, int user)
 {
        struct iscsi_bus_flash_session *fnode_sess = NULL;
        struct iscsi_bus_flash_conn *fnode_conn = NULL;
@@ -5605,6 +5605,7 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
                ql4_printk(KERN_ERR, ha,
                           "%s: A non-persistent entry %s found\n",
                           __func__, dev->kobj.name);
+               put_device(dev);
                goto exit_ddb_add;
        }
 
@@ -6112,8 +6113,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
        int parent_type, parent_index = 0xffff;
        int rc = 0;
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev)
                return -EIO;
 
@@ -6276,8 +6276,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
                        rc = sprintf(buf, "\n");
                break;
        case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
-               if ((fnode_sess->discovery_parent_idx) >= 0  &&
-                   (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES))
+               if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)
                        parent_index = fnode_sess->discovery_parent_idx;
 
                rc = sprintf(buf, "%u\n", parent_index);
@@ -6287,8 +6286,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
                        parent_type = ISCSI_DISC_PARENT_ISNS;
                else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
                        parent_type = ISCSI_DISC_PARENT_UNKNOWN;
-               else if (fnode_sess->discovery_parent_type >= 0  &&
-                        fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
+               else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
                        parent_type = ISCSI_DISC_PARENT_SENDTGT;
                else
                        parent_type = ISCSI_DISC_PARENT_UNKNOWN;
@@ -6349,6 +6347,8 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
                rc = -ENOSYS;
                break;
        }
+
+       put_device(dev);
        return rc;
 }
 
@@ -6368,20 +6368,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
 {
        struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
        struct scsi_qla_host *ha = to_qla_host(shost);
-       struct dev_db_entry *fw_ddb_entry = NULL;
        struct iscsi_flashnode_param_info *fnode_param;
        struct nlattr *attr;
        int rc = QLA_ERROR;
        uint32_t rem = len;
 
-       fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL);
-       if (!fw_ddb_entry) {
-               DEBUG2(ql4_printk(KERN_ERR, ha,
-                                 "%s: Unable to allocate ddb buffer\n",
-                                 __func__));
-               return -ENOMEM;
-       }
-
        nla_for_each_attr(attr, data, len, rem) {
                fnode_param = nla_data(attr);
 
@@ -6590,16 +6581,11 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
        struct dev_db_entry *fw_ddb_entry = NULL;
        dma_addr_t fw_ddb_entry_dma;
        uint16_t *ddb_cookie = NULL;
-       size_t ddb_size;
+       size_t ddb_size = 0;
        void *pddb = NULL;
        int target_id;
        int rc = 0;
 
-       if (!fnode_sess) {
-               rc = -EINVAL;
-               goto exit_ddb_del;
-       }
-
        if (fnode_sess->is_boot_target) {
                rc = -EPERM;
                DEBUG2(ql4_printk(KERN_ERR, ha,
@@ -6631,8 +6617,7 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
 
                dev_db_start_offset += (fnode_sess->target_id *
                                       sizeof(*fw_ddb_entry));
-               dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) -
-                                      (void *)fw_ddb_entry;
+               dev_db_start_offset += offsetof(struct dev_db_entry, cookie);
 
                ddb_size = sizeof(*ddb_cookie);
        }
index 83e0fec35d563262df3d82bbc404d0b61456353a..fe873cf7570d05abe96e71e4422c7744ee15c265 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.03.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.03.00-k9"
index 5add6f4e79281a252928519f9613cf9c92f275dc..0a537a0515ca6c6c37829fb3c45f1fe8aa20c4de 100644 (file)
@@ -1997,24 +1997,39 @@ out:
        return ret;
 }
 
-static unsigned int map_state(sector_t lba, unsigned int *num)
+static unsigned long lba_to_map_index(sector_t lba)
+{
+       if (scsi_debug_unmap_alignment) {
+               lba += scsi_debug_unmap_granularity -
+                       scsi_debug_unmap_alignment;
+       }
+       do_div(lba, scsi_debug_unmap_granularity);
+
+       return lba;
+}
+
+static sector_t map_index_to_lba(unsigned long index)
 {
-       unsigned int granularity, alignment, mapped;
-       sector_t block, next, end;
+       return index * scsi_debug_unmap_granularity -
+               scsi_debug_unmap_alignment;
+}
 
-       granularity = scsi_debug_unmap_granularity;
-       alignment = granularity - scsi_debug_unmap_alignment;
-       block = lba + alignment;
-       do_div(block, granularity);
+static unsigned int map_state(sector_t lba, unsigned int *num)
+{
+       sector_t end;
+       unsigned int mapped;
+       unsigned long index;
+       unsigned long next;
 
-       mapped = test_bit(block, map_storep);
+       index = lba_to_map_index(lba);
+       mapped = test_bit(index, map_storep);
 
        if (mapped)
-               next = find_next_zero_bit(map_storep, map_size, block);
+               next = find_next_zero_bit(map_storep, map_size, index);
        else
-               next = find_next_bit(map_storep, map_size, block);
+               next = find_next_bit(map_storep, map_size, index);
 
-       end = next * granularity - scsi_debug_unmap_alignment;
+       end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
        *num = end - lba;
 
        return mapped;
@@ -2022,47 +2037,37 @@ static unsigned int map_state(sector_t lba, unsigned int *num)
 
 static void map_region(sector_t lba, unsigned int len)
 {
-       unsigned int granularity, alignment;
        sector_t end = lba + len;
 
-       granularity = scsi_debug_unmap_granularity;
-       alignment = granularity - scsi_debug_unmap_alignment;
-
        while (lba < end) {
-               sector_t block, rem;
-
-               block = lba + alignment;
-               rem = do_div(block, granularity);
+               unsigned long index = lba_to_map_index(lba);
 
-               if (block < map_size)
-                       set_bit(block, map_storep);
+               if (index < map_size)
+                       set_bit(index, map_storep);
 
-               lba += granularity - rem;
+               lba = map_index_to_lba(index + 1);
        }
 }
 
 static void unmap_region(sector_t lba, unsigned int len)
 {
-       unsigned int granularity, alignment;
        sector_t end = lba + len;
 
-       granularity = scsi_debug_unmap_granularity;
-       alignment = granularity - scsi_debug_unmap_alignment;
-
        while (lba < end) {
-               sector_t block, rem;
-
-               block = lba + alignment;
-               rem = do_div(block, granularity);
+               unsigned long index = lba_to_map_index(lba);
 
-               if (rem == 0 && lba + granularity < end && block < map_size) {
-                       clear_bit(block, map_storep);
-                       if (scsi_debug_lbprz)
+               if (lba == map_index_to_lba(index) &&
+                   lba + scsi_debug_unmap_granularity <= end &&
+                   index < map_size) {
+                       clear_bit(index, map_storep);
+                       if (scsi_debug_lbprz) {
                                memset(fake_storep +
-                                      block * scsi_debug_sector_size, 0,
-                                      scsi_debug_sector_size);
+                                      lba * scsi_debug_sector_size, 0,
+                                      scsi_debug_sector_size *
+                                      scsi_debug_unmap_granularity);
+                       }
                }
-               lba += granularity - rem;
+               lba = map_index_to_lba(index + 1);
        }
 }
 
@@ -2089,7 +2094,7 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
 
        write_lock_irqsave(&atomic_rw, iflags);
        ret = do_device_access(SCpnt, devip, lba, num, 1);
-       if (scsi_debug_unmap_granularity)
+       if (scsi_debug_lbp())
                map_region(lba, num);
        write_unlock_irqrestore(&atomic_rw, iflags);
        if (-1 == ret)
@@ -2122,7 +2127,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
 
        write_lock_irqsave(&atomic_rw, iflags);
 
-       if (unmap && scsi_debug_unmap_granularity) {
+       if (unmap && scsi_debug_lbp()) {
                unmap_region(lba, num);
                goto out;
        }
@@ -2146,7 +2151,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
                       fake_storep + (lba * scsi_debug_sector_size),
                       scsi_debug_sector_size);
 
-       if (scsi_debug_unmap_granularity)
+       if (scsi_debug_lbp())
                map_region(lba, num);
 out:
        write_unlock_irqrestore(&atomic_rw, iflags);
@@ -3389,8 +3394,6 @@ static int __init scsi_debug_init(void)
 
        /* Logical Block Provisioning */
        if (scsi_debug_lbp()) {
-               unsigned int map_bytes;
-
                scsi_debug_unmap_max_blocks =
                        clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
 
@@ -3401,16 +3404,16 @@ static int __init scsi_debug_init(void)
                        clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
 
                if (scsi_debug_unmap_alignment &&
-                   scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
+                   scsi_debug_unmap_granularity <=
+                   scsi_debug_unmap_alignment) {
                        printk(KERN_ERR
-                              "%s: ERR: unmap_granularity < unmap_alignment\n",
+                              "%s: ERR: unmap_granularity <= unmap_alignment\n",
                               __func__);
                        return -EINVAL;
                }
 
-               map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
-               map_bytes = map_size >> 3;
-               map_storep = vmalloc(map_bytes);
+               map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
+               map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
 
                printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
                       map_size);
@@ -3421,7 +3424,7 @@ static int __init scsi_debug_init(void)
                        goto free_vm;
                }
 
-               memset(map_storep, 0x0, map_bytes);
+               bitmap_zero(map_storep, map_size);
 
                /* Map first 1KB for partition table */
                if (scsi_debug_num_parts)
index c1b05a83d403221912fb845e314d3a30656a89fc..f43de1e56420ac7916ca99c83281b81b417f4fb9 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
+#include <linux/jiffies.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -791,32 +792,48 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
        struct scsi_device *sdev = scmd->device;
        struct Scsi_Host *shost = sdev->host;
        DECLARE_COMPLETION_ONSTACK(done);
-       unsigned long timeleft;
+       unsigned long timeleft = timeout;
        struct scsi_eh_save ses;
+       const unsigned long stall_for = msecs_to_jiffies(100);
        int rtn;
 
+retry:
        scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
        shost->eh_action = &done;
 
        scsi_log_send(scmd);
        scmd->scsi_done = scsi_eh_done;
-       shost->hostt->queuecommand(shost, scmd);
-
-       timeleft = wait_for_completion_timeout(&done, timeout);
+       rtn = shost->hostt->queuecommand(shost, scmd);
+       if (rtn) {
+               if (timeleft > stall_for) {
+                       scsi_eh_restore_cmnd(scmd, &ses);
+                       timeleft -= stall_for;
+                       msleep(jiffies_to_msecs(stall_for));
+                       goto retry;
+               }
+               /* signal not to enter either branch of the if () below */
+               timeleft = 0;
+               rtn = NEEDS_RETRY;
+       } else {
+               timeleft = wait_for_completion_timeout(&done, timeout);
+       }
 
        shost->eh_action = NULL;
 
-       scsi_log_completion(scmd, SUCCESS);
+       scsi_log_completion(scmd, rtn);
 
        SCSI_LOG_ERROR_RECOVERY(3,
                printk("%s: scmd: %p, timeleft: %ld\n",
                        __func__, scmd, timeleft));
 
        /*
-        * If there is time left scsi_eh_done got called, and we will
-        * examine the actual status codes to see whether the command
-        * actually did complete normally, else tell the host to forget
-        * about this command.
+        * If there is time left scsi_eh_done got called, and we will examine
+        * the actual status codes to see whether the command actually did
+        * complete normally, else if we have a zero return and no time left,
+        * the command must still be pending, so abort it and return FAILED.
+        * If we never actually managed to issue the command, because
+        * ->queuecommand() kept returning non zero, use the rtn = FAILED
+        * value above (so don't execute either branch of the if)
         */
        if (timeleft) {
                rtn = scsi_eh_completed_normally(scmd);
@@ -837,7 +854,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
                        rtn = FAILED;
                        break;
                }
-       } else {
+       } else if (!rtn) {
                scsi_abort_eh_cmnd(scmd);
                rtn = FAILED;
        }
index c31187d79343a17af26404a2d6c547dc392ae151..86d522004a208255b17861b4c1cb556bbe7c973e 100644 (file)
@@ -276,11 +276,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 }
 EXPORT_SYMBOL(scsi_execute);
 
-
-int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
+int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
                     int data_direction, void *buffer, unsigned bufflen,
                     struct scsi_sense_hdr *sshdr, int timeout, int retries,
-                    int *resid)
+                    int *resid, int flags)
 {
        char *sense = NULL;
        int result;
@@ -291,14 +290,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
                        return DRIVER_ERROR << 24;
        }
        result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
-                             sense, timeout, retries, 0, resid);
+                             sense, timeout, retries, flags, resid);
        if (sshdr)
                scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
 
        kfree(sense);
        return result;
 }
-EXPORT_SYMBOL(scsi_execute_req);
+EXPORT_SYMBOL(scsi_execute_req_flags);
 
 /*
  * Function:    scsi_init_cmd_errh()
index 8f6b12cbd224801e2828571beb6938808d87b26c..42539ee2cb111f0e10a6b152d9a0d60cbb2d895b 100644 (file)
@@ -144,33 +144,83 @@ static int scsi_bus_restore(struct device *dev)
 
 #ifdef CONFIG_PM_RUNTIME
 
+static int sdev_blk_runtime_suspend(struct scsi_device *sdev,
+                                       int (*cb)(struct device *))
+{
+       int err;
+
+       err = blk_pre_runtime_suspend(sdev->request_queue);
+       if (err)
+               return err;
+       if (cb)
+               err = cb(&sdev->sdev_gendev);
+       blk_post_runtime_suspend(sdev->request_queue, err);
+
+       return err;
+}
+
+static int sdev_runtime_suspend(struct device *dev)
+{
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL;
+       struct scsi_device *sdev = to_scsi_device(dev);
+       int err;
+
+       if (sdev->request_queue->dev)
+               return sdev_blk_runtime_suspend(sdev, cb);
+
+       err = scsi_dev_type_suspend(dev, cb);
+       if (err == -EAGAIN)
+               pm_schedule_suspend(dev, jiffies_to_msecs(
+                                       round_jiffies_up_relative(HZ/10)));
+       return err;
+}
+
 static int scsi_runtime_suspend(struct device *dev)
 {
        int err = 0;
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
        dev_dbg(dev, "scsi_runtime_suspend\n");
-       if (scsi_is_sdev_device(dev)) {
-               err = scsi_dev_type_suspend(dev,
-                               pm ? pm->runtime_suspend : NULL);
-               if (err == -EAGAIN)
-                       pm_schedule_suspend(dev, jiffies_to_msecs(
-                               round_jiffies_up_relative(HZ/10)));
-       }
+       if (scsi_is_sdev_device(dev))
+               err = sdev_runtime_suspend(dev);
 
        /* Insert hooks here for targets, hosts, and transport classes */
 
        return err;
 }
 
-static int scsi_runtime_resume(struct device *dev)
+static int sdev_blk_runtime_resume(struct scsi_device *sdev,
+                                       int (*cb)(struct device *))
 {
        int err = 0;
+
+       blk_pre_runtime_resume(sdev->request_queue);
+       if (cb)
+               err = cb(&sdev->sdev_gendev);
+       blk_post_runtime_resume(sdev->request_queue, err);
+
+       return err;
+}
+
+static int sdev_runtime_resume(struct device *dev)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int (*cb)(struct device *) = pm ? pm->runtime_resume : NULL;
+
+       if (sdev->request_queue->dev)
+               return sdev_blk_runtime_resume(sdev, cb);
+       else
+               return scsi_dev_type_resume(dev, cb);
+}
+
+static int scsi_runtime_resume(struct device *dev)
+{
+       int err = 0;
 
        dev_dbg(dev, "scsi_runtime_resume\n");
        if (scsi_is_sdev_device(dev))
-               err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL);
+               err = sdev_runtime_resume(dev);
 
        /* Insert hooks here for targets, hosts, and transport classes */
 
@@ -185,10 +235,18 @@ static int scsi_runtime_idle(struct device *dev)
 
        /* Insert hooks here for targets, hosts, and transport classes */
 
-       if (scsi_is_sdev_device(dev))
-               err = pm_schedule_suspend(dev, 100);
-       else
+       if (scsi_is_sdev_device(dev)) {
+               struct scsi_device *sdev = to_scsi_device(dev);
+
+               if (sdev->request_queue->dev) {
+                       pm_runtime_mark_last_busy(dev);
+                       err = pm_runtime_autosuspend(dev);
+               } else {
+                       err = pm_runtime_suspend(dev);
+               }
+       } else {
                err = pm_runtime_suspend(dev);
+       }
        return err;
 }
 
index 47799a33d6caacf7e5715e9c7760d121c24797b1..133926b1bb78bc39231260b5359dff3ff9a16bcb 100644 (file)
@@ -1019,8 +1019,7 @@ exit_match_index:
 /**
  * iscsi_get_flashnode_by_index -finds flashnode session entry by index
  * @shost: pointer to host data
- * @data: pointer to data containing value to use for comparison
- * @fn: function pointer that does actual comparison
+ * @idx: index to match
  *
  * Finds the flashnode session object for the passed index
  *
@@ -1029,13 +1028,13 @@ exit_match_index:
  *  %NULL on failure
  */
 static struct iscsi_bus_flash_session *
-iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data,
-                            int (*fn)(struct device *dev, void *data))
+iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx)
 {
        struct iscsi_bus_flash_session *fnode_sess = NULL;
        struct device *dev;
 
-       dev = device_find_child(&shost->shost_gendev, data, fn);
+       dev = device_find_child(&shost->shost_gendev, &idx,
+                               flashnode_match_index);
        if (dev)
                fnode_sess = iscsi_dev_to_flash_session(dev);
 
@@ -1059,18 +1058,13 @@ struct device *
 iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
                          int (*fn)(struct device *dev, void *data))
 {
-       struct device *dev;
-
-       dev = device_find_child(&shost->shost_gendev, data, fn);
-       return dev;
+       return device_find_child(&shost->shost_gendev, data, fn);
 }
 EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
 
 /**
  * iscsi_find_flashnode_conn - finds flashnode connection entry
  * @fnode_sess: pointer to parent flashnode session entry
- * @data: pointer to data containing value to use for comparison
- * @fn: function pointer that does actual comparison
  *
  * Finds the flashnode connection object comparing the data passed using logic
  * defined in passed function pointer
@@ -1080,14 +1074,10 @@ EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
  *  %NULL on failure
  */
 struct device *
-iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
-                         void *data,
-                         int (*fn)(struct device *dev, void *data))
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess)
 {
-       struct device *dev;
-
-       dev = device_find_child(&fnode_sess->dev, data, fn);
-       return dev;
+       return device_find_child(&fnode_sess->dev, NULL,
+                                iscsi_is_flashnode_conn_dev);
 }
 EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
 
@@ -2808,7 +2798,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
        struct iscsi_bus_flash_session *fnode_sess;
        struct iscsi_bus_flash_conn *fnode_conn;
        struct device *dev;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->set_flashnode_param) {
@@ -2824,25 +2814,27 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.set_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.set_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.set_flashnode.host_no);
+                      __func__, idx, ev->u.set_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev) {
                err = -ENODEV;
-               goto put_host;
+               goto put_sess;
        }
 
        fnode_conn = iscsi_dev_to_flash_conn(dev);
        err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
+       put_device(dev);
+
+put_sess:
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -2891,7 +2883,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
 {
        struct Scsi_Host *shost;
        struct iscsi_bus_flash_session *fnode_sess;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->del_flashnode) {
@@ -2907,17 +2899,17 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.del_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.del_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.del_flashnode.host_no);
+                      __func__, idx, ev->u.del_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
        err = transport->del_flashnode(fnode_sess);
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -2933,7 +2925,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
        struct iscsi_bus_flash_session *fnode_sess;
        struct iscsi_bus_flash_conn *fnode_conn;
        struct device *dev;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->login_flashnode) {
@@ -2949,25 +2941,27 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.login_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.login_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.login_flashnode.host_no);
+                      __func__, idx, ev->u.login_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev) {
                err = -ENODEV;
-               goto put_host;
+               goto put_sess;
        }
 
        fnode_conn = iscsi_dev_to_flash_conn(dev);
        err = transport->login_flashnode(fnode_sess, fnode_conn);
+       put_device(dev);
+
+put_sess:
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -2983,7 +2977,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
        struct iscsi_bus_flash_session *fnode_sess;
        struct iscsi_bus_flash_conn *fnode_conn;
        struct device *dev;
-       uint32_t *idx;
+       uint32_t idx;
        int err = 0;
 
        if (!transport->logout_flashnode) {
@@ -2999,26 +2993,28 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
                goto put_host;
        }
 
-       idx = &ev->u.logout_flashnode.flashnode_idx;
-       fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
-                                                 flashnode_match_index);
+       idx = ev->u.logout_flashnode.flashnode_idx;
+       fnode_sess = iscsi_get_flashnode_by_index(shost, idx);
        if (!fnode_sess) {
                pr_err("%s could not find flashnode %u for host no %u\n",
-                      __func__, *idx, ev->u.logout_flashnode.host_no);
+                      __func__, idx, ev->u.logout_flashnode.host_no);
                err = -ENODEV;
                goto put_host;
        }
 
-       dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
-                                       iscsi_is_flashnode_conn_dev);
+       dev = iscsi_find_flashnode_conn(fnode_sess);
        if (!dev) {
                err = -ENODEV;
-               goto put_host;
+               goto put_sess;
        }
 
        fnode_conn = iscsi_dev_to_flash_conn(dev);
 
        err = transport->logout_flashnode(fnode_sess, fnode_conn);
+       put_device(dev);
+
+put_sess:
+       put_device(&fnode_sess->dev);
 
 put_host:
        scsi_host_put(shost);
@@ -3985,8 +3981,10 @@ static __init int iscsi_transport_init(void)
        }
 
        iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
-       if (!iscsi_eh_timer_workq)
+       if (!iscsi_eh_timer_workq) {
+               err = -ENOMEM;
                goto release_nls;
+       }
 
        return 0;
 
index e6689776b4f617ac55b1e9c4dac138cee85a9196..c1c555242d0d715d46c051955deacea4a819d634 100644 (file)
@@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
        char *buffer_data;
        struct scsi_mode_data data;
        struct scsi_sense_hdr sshdr;
+       const char *temp = "temporary ";
        int len;
 
        if (sdp->type != TYPE_DISK)
@@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
                 * it's not worth the risk */
                return -EINVAL;
 
+       if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
+               buf += sizeof(temp) - 1;
+               sdkp->cache_override = 1;
+       } else {
+               sdkp->cache_override = 0;
+       }
+
        for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
                len = strlen(sd_cache_types[i]);
                if (strncmp(sd_cache_types[i], buf, len) == 0 &&
@@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
        rcd = ct & 0x01 ? 1 : 0;
        wce = ct & 0x02 ? 1 : 0;
+
+       if (sdkp->cache_override) {
+               sdkp->WCE = wce;
+               sdkp->RCD = rcd;
+               return count;
+       }
+
        if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
                            SD_MAX_RETRIES, &data, NULL))
                return -EINVAL;
@@ -1121,10 +1136,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
 
        sdev = sdkp->device;
 
-       retval = scsi_autopm_get_device(sdev);
-       if (retval)
-               goto error_autopm;
-
        /*
         * If the device is in error recovery, wait until it is done.
         * If the device is offline, then disallow any access to it.
@@ -1169,8 +1180,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode)
        return 0;
 
 error_out:
-       scsi_autopm_put_device(sdev);
-error_autopm:
        scsi_disk_put(sdkp);
        return retval;  
 }
@@ -1205,7 +1214,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
         * XXX is followed by a "rmmod sd_mod"?
         */
 
-       scsi_autopm_put_device(sdev);
        scsi_disk_put(sdkp);
 }
 
@@ -1366,14 +1374,9 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
        retval = -ENODEV;
 
        if (scsi_block_when_processing_errors(sdp)) {
-               retval = scsi_autopm_get_device(sdp);
-               if (retval)
-                       goto out;
-
                sshdr  = kzalloc(sizeof(*sshdr), GFP_KERNEL);
                retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
                                              sshdr);
-               scsi_autopm_put_device(sdp);
        }
 
        /* failed to execute TUR, assume media not present */
@@ -1423,8 +1426,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
                 * Leave the rest of the command zero to indicate
                 * flush everything.
                 */
-               res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
-                                      SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL);
+               res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
+                                            &sshdr, SD_FLUSH_TIMEOUT,
+                                            SD_MAX_RETRIES, NULL, REQ_PM);
                if (res == 0)
                        break;
        }
@@ -2318,6 +2322,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
        int old_rcd = sdkp->RCD;
        int old_dpofua = sdkp->DPOFUA;
 
+
+       if (sdkp->cache_override)
+               return;
+
        first_len = 4;
        if (sdp->skip_ms_page_8) {
                if (sdp->type == TYPE_RBC)
@@ -2811,6 +2819,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
        sdkp->capacity = 0;
        sdkp->media_present = 1;
        sdkp->write_prot = 0;
+       sdkp->cache_override = 0;
        sdkp->WCE = 0;
        sdkp->RCD = 0;
        sdkp->ATO = 0;
@@ -2837,6 +2846,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
 
        sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
                  sdp->removable ? "removable " : "");
+       blk_pm_runtime_init(sdp->request_queue, dev);
        scsi_autopm_put_device(sdp);
        put_device(&sdkp->dev);
 }
@@ -3020,8 +3030,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
        if (!scsi_device_online(sdp))
                return -ENODEV;
 
-       res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
-                              SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+       res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
+                              SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
        if (res) {
                sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n");
                sd_print_result(sdkp, res);
index 74a1e4ca5401f9a58e6e705bb2fc8a99fcd04a0f..2386aeb41fe8d74826908d3dca71eb3f97a59ac8 100644 (file)
@@ -73,6 +73,7 @@ struct scsi_disk {
        u8              protection_type;/* Data Integrity Field */
        u8              provisioning_mode;
        unsigned        ATO : 1;        /* state of disk ATO bit */
+       unsigned        cache_override : 1; /* temp override of WCE,RCD */
        unsigned        WCE : 1;        /* state of disk WCE bit */
        unsigned        RCD : 1;        /* state of disk RCD bit, unused */
        unsigned        DPOFUA : 1;     /* state of disk DPOFUA bit */
index 04998f36e5071bdda94ff225f39b0ca3616eeb7b..6174ca4ea27594487d7dc0828d9e21841742b8ed 100644 (file)
@@ -93,14 +93,6 @@ static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
                if (sdt->app_tag == 0xffff)
                        return 0;
 
-               /* Bad ref tag received from disk */
-               if (sdt->ref_tag == 0xffffffff) {
-                       printk(KERN_ERR
-                              "%s: bad phys ref tag on sector %lu\n",
-                              bix->disk_name, (unsigned long)sector);
-                       return -EIO;
-               }
-
                if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
                        printk(KERN_ERR
                               "%s: ref tag error on sector %lu (rcvd %u)\n",
index 0371047c59222251971d823219e3639ac2a53ff6..35faf24c604401eeb560472542cd2ed350289d10 100644 (file)
@@ -57,3 +57,14 @@ config SCSI_UFSHCD_PCI
          If you have a controller with this interface, say Y or M here.
 
          If unsure, say N.
+
+config SCSI_UFSHCD_PLATFORM
+       tristate "Platform bus based UFS Controller support"
+       depends on SCSI_UFSHCD
+       ---help---
+       This selects the UFS host controller support. Select this if
+       you have an UFS controller on Platform bus.
+
+       If you have a controller with this interface, say Y or M here.
+
+         If unsure, say N.
index 9eda0dfbd6df383304de10a0805868ee6485929c..1e5bd48457d636ef0aafd096f8a8038ac0dcbd5e 100644 (file)
@@ -1,3 +1,4 @@
 # UFSHCD makefile
 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
 obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
+obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
new file mode 100644 (file)
index 0000000..03319ac
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Universal Flash Storage Host controller Platform bus based glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ *     Santosh Yaraganavi <santosh.sy@samsung.com>
+ *     Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/platform_device.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pltfrm_suspend - suspend power management function
+ * @dev: pointer to device handle
+ *
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       /*
+        * TODO:
+        * 1. Call ufshcd_suspend
+        * 2. Do bus specific power management
+        */
+
+       disable_irq(hba->irq);
+
+       return 0;
+}
+
+/**
+ * ufshcd_pltfrm_resume - resume power management function
+ * @dev: pointer to device handle
+ *
+ * Returns 0
+ */
+static int ufshcd_pltfrm_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       /*
+        * TODO:
+        * 1. Call ufshcd_resume.
+        * 2. Do bus specific wake up
+        */
+
+       enable_irq(hba->irq);
+
+       return 0;
+}
+#else
+#define ufshcd_pltfrm_suspend  NULL
+#define ufshcd_pltfrm_resume   NULL
+#endif
+
+/**
+ * ufshcd_pltfrm_probe - probe routine of the driver
+ * @pdev: pointer to Platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_probe(struct platform_device *pdev)
+{
+       struct ufs_hba *hba;
+       void __iomem *mmio_base;
+       struct resource *mem_res;
+       struct resource *irq_res;
+       resource_size_t mem_size;
+       int err;
+       struct device *dev = &pdev->dev;
+
+       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem_res) {
+               dev_err(&pdev->dev,
+                       "Memory resource not available\n");
+               err = -ENODEV;
+               goto out_error;
+       }
+
+       mem_size = resource_size(mem_res);
+       if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) {
+               dev_err(&pdev->dev,
+                       "Cannot reserve the memory resource\n");
+               err = -EBUSY;
+               goto out_error;
+       }
+
+       mmio_base = ioremap_nocache(mem_res->start, mem_size);
+       if (!mmio_base) {
+               dev_err(&pdev->dev, "memory map failed\n");
+               err = -ENOMEM;
+               goto out_release_regions;
+       }
+
+       irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!irq_res) {
+               dev_err(&pdev->dev, "IRQ resource not available\n");
+               err = -ENODEV;
+               goto out_iounmap;
+       }
+
+       err = dma_set_coherent_mask(dev, dev->coherent_dma_mask);
+       if (err) {
+               dev_err(&pdev->dev, "set dma mask failed\n");
+               goto out_iounmap;
+       }
+
+       err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start);
+       if (err) {
+               dev_err(&pdev->dev, "Intialization failed\n");
+               goto out_iounmap;
+       }
+
+       platform_set_drvdata(pdev, hba);
+
+       return 0;
+
+out_iounmap:
+       iounmap(mmio_base);
+out_release_regions:
+       release_mem_region(mem_res->start, mem_size);
+out_error:
+       return err;
+}
+
+/**
+ * ufshcd_pltfrm_remove - remove platform driver routine
+ * @pdev: pointer to platform device handle
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_pltfrm_remove(struct platform_device *pdev)
+{
+       struct resource *mem_res;
+       resource_size_t mem_size;
+       struct ufs_hba *hba =  platform_get_drvdata(pdev);
+
+       disable_irq(hba->irq);
+
+       /* Some buggy controllers raise interrupt after
+        * the resources are removed. So first we unregister the
+        * irq handler and then the resources used by driver
+        */
+
+       free_irq(hba->irq, hba);
+       ufshcd_remove(hba);
+       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!mem_res)
+               dev_err(&pdev->dev, "ufshcd: Memory resource not available\n");
+       else {
+               mem_size = resource_size(mem_res);
+               release_mem_region(mem_res->start, mem_size);
+       }
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+static const struct of_device_id ufs_of_match[] = {
+       { .compatible = "jedec,ufs-1.1"},
+};
+
+static const struct dev_pm_ops ufshcd_dev_pm_ops = {
+       .suspend        = ufshcd_pltfrm_suspend,
+       .resume         = ufshcd_pltfrm_resume,
+};
+
+static struct platform_driver ufshcd_pltfrm_driver = {
+       .probe  = ufshcd_pltfrm_probe,
+       .remove = ufshcd_pltfrm_remove,
+       .driver = {
+               .name   = "ufshcd",
+               .owner  = THIS_MODULE,
+               .pm     = &ufshcd_dev_pm_ops,
+               .of_match_table = ufs_of_match,
+       },
+};
+
+module_platform_driver(ufshcd_pltfrm_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
index 60fd40c4e4c2ada48a268a997099b18f2fa4ef34..c32a478df81b83c75cdbbdac9206236021f9f863 100644 (file)
@@ -478,7 +478,7 @@ static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
                ucd_cmd_ptr->header.dword_2 = 0;
 
                ucd_cmd_ptr->exp_data_transfer_len =
-                       cpu_to_be32(lrbp->cmd->transfersize);
+                       cpu_to_be32(lrbp->cmd->sdb.length);
 
                memcpy(ucd_cmd_ptr->cdb,
                       lrbp->cmd->cmnd,
index 720665ca2bb1f0332acd3367eab6bdcd88a3428e..e84cf04f441690e0d518e05a4b59976a80860662 100644 (file)
@@ -9,6 +9,19 @@
 
 #include "ssb_private.h"
 
+static struct resource ssb_sflash_resource = {
+       .name   = "ssb_sflash",
+       .start  = SSB_FLASH2,
+       .end    = 0,
+       .flags  = IORESOURCE_MEM | IORESOURCE_READONLY,
+};
+
+struct platform_device ssb_sflash_dev = {
+       .name           = "ssb_sflash",
+       .resource       = &ssb_sflash_resource,
+       .num_resources  = 1,
+};
+
 struct ssb_sflash_tbl_e {
        char *name;
        u32 id;
@@ -16,7 +29,7 @@ struct ssb_sflash_tbl_e {
        u16 numblocks;
 };
 
-static struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = {
+static const struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = {
        { "M25P20", 0x11, 0x10000, 4, },
        { "M25P40", 0x12, 0x10000, 8, },
 
@@ -27,7 +40,7 @@ static struct ssb_sflash_tbl_e ssb_sflash_st_tbl[] = {
        { 0 },
 };
 
-static struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = {
+static const struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = {
        { "SST25WF512", 1, 0x1000, 16, },
        { "SST25VF512", 0x48, 0x1000, 16, },
        { "SST25WF010", 2, 0x1000, 32, },
@@ -45,7 +58,7 @@ static struct ssb_sflash_tbl_e ssb_sflash_sst_tbl[] = {
        { 0 },
 };
 
-static struct ssb_sflash_tbl_e ssb_sflash_at_tbl[] = {
+static const struct ssb_sflash_tbl_e ssb_sflash_at_tbl[] = {
        { "AT45DB011", 0xc, 256, 512, },
        { "AT45DB021", 0x14, 256, 1024, },
        { "AT45DB041", 0x1c, 256, 2048, },
@@ -73,7 +86,8 @@ static void ssb_sflash_cmd(struct ssb_chipcommon *cc, u32 opcode)
 /* Initialize serial flash access */
 int ssb_sflash_init(struct ssb_chipcommon *cc)
 {
-       struct ssb_sflash_tbl_e *e;
+       struct ssb_sflash *sflash = &cc->dev->bus->mipscore.sflash;
+       const struct ssb_sflash_tbl_e *e;
        u32 id, id2;
 
        switch (cc->capabilities & SSB_CHIPCO_CAP_FLASHT) {
@@ -131,9 +145,21 @@ int ssb_sflash_init(struct ssb_chipcommon *cc)
                return -ENOTSUPP;
        }
 
+       sflash->window = SSB_FLASH2;
+       sflash->blocksize = e->blocksize;
+       sflash->numblocks = e->numblocks;
+       sflash->size = sflash->blocksize * sflash->numblocks;
+       sflash->present = true;
+
        pr_info("Found %s serial flash (blocksize: 0x%X, blocks: %d)\n",
                e->name, e->blocksize, e->numblocks);
 
+       /* Prepare platform device, but don't register it yet. It's too early,
+        * malloc (required by device_private_init) is not available yet. */
+       ssb_sflash_dev.resource[0].end = ssb_sflash_dev.resource[0].start +
+                                        sflash->size;
+       ssb_sflash_dev.dev.platform_data = sflash;
+
        pr_err("Serial flash support is not implemented yet!\n");
 
        return -ENOTSUPP;
index 812775a4bfb6c6a2accfba6549b6d930c5b59fb2..e55ddf7cd7c2c42274145051c16760da800ad5ee 100644 (file)
@@ -553,6 +553,14 @@ static int ssb_devices_register(struct ssb_bus *bus)
        }
 #endif
 
+#ifdef CONFIG_SSB_SFLASH
+       if (bus->mipscore.sflash.present) {
+               err = platform_device_register(&ssb_sflash_dev);
+               if (err)
+                       pr_err("Error registering serial flash\n");
+       }
+#endif
+
        return 0;
 error:
        /* Unwind the already registered devices. */
index 4671f17f09aff44e809766e6317273d480ec6444..eb507a50a564dae01362b5cec6ae2ad25c766100 100644 (file)
@@ -243,6 +243,10 @@ static inline int ssb_sflash_init(struct ssb_chipcommon *cc)
 extern struct platform_device ssb_pflash_dev;
 #endif
 
+#ifdef CONFIG_SSB_SFLASH
+extern struct platform_device ssb_sflash_dev;
+#endif
+
 #ifdef CONFIG_SSB_DRIVER_EXTIF
 extern u32 ssb_extif_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt, u32 ticks);
 extern u32 ssb_extif_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms);
index ef2e08e9b5901fdd9267a66896da10f829f931ac..5dc9c4bfa66e4686d5360ab33da61eff1109715a 100644 (file)
@@ -14,7 +14,6 @@
  * 2.4/2.5 port                 David McCullough
  */
 
-#include <asm/dbg.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/serial.h>
index 52a3ecd404219c7365850debe309d06b99a2d8c8..6fa2ae77fffde5a2de2fd14f8568da7fc55f8d1e 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/serial.h>
 #include <linux/serial_core.h>
 
-#include <bcm63xx_clk.h>
 #include <bcm63xx_irq.h>
 #include <bcm63xx_regs.h>
 #include <bcm63xx_io.h>
index 6953dc82850cb278fd99da209e560021d4d0c1e8..a4fdce74f883e4f5868f3efb975ec51347ef2c47 100644 (file)
@@ -60,24 +60,22 @@ static void tty_audit_buf_put(struct tty_audit_buf *buf)
                tty_audit_buf_free(buf);
 }
 
-static void tty_audit_log(const char *description, struct task_struct *tsk,
-                         kuid_t loginuid, unsigned sessionid, int major,
-                         int minor, unsigned char *data, size_t size)
+static void tty_audit_log(const char *description, int major, int minor,
+                         unsigned char *data, size_t size)
 {
        struct audit_buffer *ab;
+       struct task_struct *tsk = current;
+       uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
+       uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
+       u32 sessionid = audit_get_sessionid(tsk);
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY);
        if (ab) {
                char name[sizeof(tsk->comm)];
-               kuid_t uid = task_uid(tsk);
-
-               audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u "
-                                "major=%d minor=%d comm=", description,
-                                tsk->pid,
-                                from_kuid(&init_user_ns, uid),
-                                from_kuid(&init_user_ns, loginuid),
-                                sessionid,
-                                major, minor);
+
+               audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
+                                " minor=%d comm=", description, tsk->pid, uid,
+                                loginuid, sessionid, major, minor);
                get_task_comm(name, tsk);
                audit_log_untrustedstring(ab, name);
                audit_log_format(ab, " data=");
@@ -90,11 +88,9 @@ static void tty_audit_log(const char *description, struct task_struct *tsk,
  *     tty_audit_buf_push      -       Push buffered data out
  *
  *     Generate an audit message from the contents of @buf, which is owned by
- *     @tsk with @loginuid.  @buf->mutex must be locked.
+ *     the current task.  @buf->mutex must be locked.
  */
-static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid,
-                              unsigned int sessionid,
-                              struct tty_audit_buf *buf)
+static void tty_audit_buf_push(struct tty_audit_buf *buf)
 {
        if (buf->valid == 0)
                return;
@@ -102,24 +98,10 @@ static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid,
                buf->valid = 0;
                return;
        }
-       tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor,
-                     buf->data, buf->valid);
+       tty_audit_log("tty", buf->major, buf->minor, buf->data, buf->valid);
        buf->valid = 0;
 }
 
-/**
- *     tty_audit_buf_push_current      -       Push buffered data out
- *
- *     Generate an audit message from the contents of @buf, which is owned by
- *     the current task.  @buf->mutex must be locked.
- */
-static void tty_audit_buf_push_current(struct tty_audit_buf *buf)
-{
-       kuid_t auid = audit_get_loginuid(current);
-       unsigned int sessionid = audit_get_sessionid(current);
-       tty_audit_buf_push(current, auid, sessionid, buf);
-}
-
 /**
  *     tty_audit_exit  -       Handle a task exit
  *
@@ -130,15 +112,13 @@ void tty_audit_exit(void)
 {
        struct tty_audit_buf *buf;
 
-       spin_lock_irq(&current->sighand->siglock);
        buf = current->signal->tty_audit_buf;
        current->signal->tty_audit_buf = NULL;
-       spin_unlock_irq(&current->sighand->siglock);
        if (!buf)
                return;
 
        mutex_lock(&buf->mutex);
-       tty_audit_buf_push_current(buf);
+       tty_audit_buf_push(buf);
        mutex_unlock(&buf->mutex);
 
        tty_audit_buf_put(buf);
@@ -151,9 +131,8 @@ void tty_audit_exit(void)
  */
 void tty_audit_fork(struct signal_struct *sig)
 {
-       spin_lock_irq(&current->sighand->siglock);
        sig->audit_tty = current->signal->audit_tty;
-       spin_unlock_irq(&current->sighand->siglock);
+       sig->audit_tty_log_passwd = current->signal->audit_tty_log_passwd;
 }
 
 /**
@@ -163,20 +142,21 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch)
 {
        struct tty_audit_buf *buf;
        int major, minor, should_audit;
+       unsigned long flags;
 
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irqsave(&current->sighand->siglock, flags);
        should_audit = current->signal->audit_tty;
        buf = current->signal->tty_audit_buf;
        if (buf)
                atomic_inc(&buf->count);
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
 
        major = tty->driver->major;
        minor = tty->driver->minor_start + tty->index;
        if (buf) {
                mutex_lock(&buf->mutex);
                if (buf->major == major && buf->minor == minor)
-                       tty_audit_buf_push_current(buf);
+                       tty_audit_buf_push(buf);
                mutex_unlock(&buf->mutex);
                tty_audit_buf_put(buf);
        }
@@ -187,24 +167,20 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch)
 
                auid = audit_get_loginuid(current);
                sessionid = audit_get_sessionid(current);
-               tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major,
-                             minor, &ch, 1);
+               tty_audit_log("ioctl=TIOCSTI", major, minor, &ch, 1);
        }
 }
 
 /**
- * tty_audit_push_task -       Flush task's pending audit data
- * @tsk:               task pointer
- * @loginuid:          sender login uid
- * @sessionid:         sender session id
+ * tty_audit_push_current -    Flush current's pending audit data
  *
- * Called with a ref on @tsk held. Try to lock sighand and get a
- * reference to the tty audit buffer if available.
+ * Try to lock sighand and get a reference to the tty audit buffer if available.
  * Flush the buffer or return an appropriate error code.
  */
-int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid)
+int tty_audit_push_current(void)
 {
        struct tty_audit_buf *buf = ERR_PTR(-EPERM);
+       struct task_struct *tsk = current;
        unsigned long flags;
 
        if (!lock_task_sighand(tsk, &flags))
@@ -225,7 +201,7 @@ int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid)
                return PTR_ERR(buf);
 
        mutex_lock(&buf->mutex);
-       tty_audit_buf_push(tsk, loginuid, sessionid, buf);
+       tty_audit_buf_push(buf);
        mutex_unlock(&buf->mutex);
 
        tty_audit_buf_put(buf);
@@ -243,10 +219,11 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
                unsigned icanon)
 {
        struct tty_audit_buf *buf, *buf2;
+       unsigned long flags;
 
        buf = NULL;
        buf2 = NULL;
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irqsave(&current->sighand->siglock, flags);
        if (likely(!current->signal->audit_tty))
                goto out;
        buf = current->signal->tty_audit_buf;
@@ -254,7 +231,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
                atomic_inc(&buf->count);
                goto out;
        }
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
 
        buf2 = tty_audit_buf_alloc(tty->driver->major,
                                   tty->driver->minor_start + tty->index,
@@ -264,7 +241,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
                return NULL;
        }
 
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irqsave(&current->sighand->siglock, flags);
        if (!current->signal->audit_tty)
                goto out;
        buf = current->signal->tty_audit_buf;
@@ -276,7 +253,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
        atomic_inc(&buf->count);
        /* Fall through */
  out:
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
        if (buf2)
                tty_audit_buf_free(buf2);
        return buf;
@@ -292,10 +269,18 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
 {
        struct tty_audit_buf *buf;
        int major, minor;
+       int audit_log_tty_passwd;
+       unsigned long flags;
 
        if (unlikely(size == 0))
                return;
 
+       spin_lock_irqsave(&current->sighand->siglock, flags);
+       audit_log_tty_passwd = current->signal->audit_tty_log_passwd;
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
+       if (!audit_log_tty_passwd && icanon && !L_ECHO(tty))
+               return;
+
        if (tty->driver->type == TTY_DRIVER_TYPE_PTY
            && tty->driver->subtype == PTY_TYPE_MASTER)
                return;
@@ -309,7 +294,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
        minor = tty->driver->minor_start + tty->index;
        if (buf->major != major || buf->minor != minor
            || buf->icanon != icanon) {
-               tty_audit_buf_push_current(buf);
+               tty_audit_buf_push(buf);
                buf->major = major;
                buf->minor = minor;
                buf->icanon = icanon;
@@ -325,7 +310,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
                data += run;
                size -= run;
                if (buf->valid == N_TTY_BUF_SIZE)
-                       tty_audit_buf_push_current(buf);
+                       tty_audit_buf_push(buf);
        } while (size != 0);
        mutex_unlock(&buf->mutex);
        tty_audit_buf_put(buf);
@@ -339,16 +324,17 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
 void tty_audit_push(struct tty_struct *tty)
 {
        struct tty_audit_buf *buf;
+       unsigned long flags;
 
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irqsave(&current->sighand->siglock, flags);
        if (likely(!current->signal->audit_tty)) {
-               spin_unlock_irq(&current->sighand->siglock);
+               spin_unlock_irqrestore(&current->sighand->siglock, flags);
                return;
        }
        buf = current->signal->tty_audit_buf;
        if (buf)
                atomic_inc(&buf->count);
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irqrestore(&current->sighand->siglock, flags);
 
        if (buf) {
                int major, minor;
@@ -357,7 +343,7 @@ void tty_audit_push(struct tty_struct *tty)
                minor = tty->driver->minor_start + tty->index;
                mutex_lock(&buf->mutex);
                if (buf->major == major && buf->minor == minor)
-                       tty_audit_buf_push_current(buf);
+                       tty_audit_buf_push(buf);
                mutex_unlock(&buf->mutex);
                tty_audit_buf_put(buf);
        }
index ddabaa867b0dc38e24ab090d6e62108c88a02a2c..700cac067b4611891af50d90ee083bdbb0f94eb9 100644 (file)
@@ -111,30 +111,16 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
        switch (blank_mode) {
 
        case VESA_NO_BLANKING:
-                       /* Turn on panel */
-                       fbdev->regs->lcd_control |= LCD_CONTROL_GO;
-#ifdef CONFIG_MIPS_PB1100
-                       if (fbdev->panel_idx == 1) {
-                               au_writew(au_readw(PB1100_G_CONTROL)
-                                         | (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
-                       PB1100_G_CONTROL);
-                       }
-#endif
+               /* Turn on panel */
+               fbdev->regs->lcd_control |= LCD_CONTROL_GO;
                au_sync();
                break;
 
        case VESA_VSYNC_SUSPEND:
        case VESA_HSYNC_SUSPEND:
        case VESA_POWERDOWN:
-                       /* Turn off panel */
-                       fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
-#ifdef CONFIG_MIPS_PB1100
-                       if (fbdev->panel_idx == 1) {
-                               au_writew(au_readw(PB1100_G_CONTROL)
-                                         & ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD),
-                       PB1100_G_CONTROL);
-                       }
-#endif
+               /* Turn off panel */
+               fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
                au_sync();
                break;
        default:
index dd4d9cb862432b4ce6715aba3164a53ffaaa87a5..f03bf501527f64785a3fbbec39963f979d70ad8a 100644 (file)
@@ -141,7 +141,7 @@ config XEN_GRANT_DEV_ALLOC
 
 config SWIOTLB_XEN
        def_bool y
-       depends on PCI
+       depends on PCI && X86
        select SWIOTLB
 
 config XEN_TMEM
index d8cc8127f19c18f9b6a4ea9f937015363b912bd8..6a6bbe4ede92c67afe4c88efd105839f14b3a240 100644 (file)
@@ -167,6 +167,8 @@ static void xen_irq_info_common_init(struct irq_info *info,
        info->cpu = cpu;
 
        evtchn_to_irq[evtchn] = irq;
+
+       irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
 }
 
 static void xen_irq_info_evtchn_init(unsigned irq,
@@ -874,7 +876,6 @@ int bind_evtchn_to_irq(unsigned int evtchn)
                struct irq_info *info = info_for_irq(irq);
                WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
        }
-       irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
 
 out:
        mutex_unlock(&irq_mapping_update_lock);
index d5c25db4398f0244f0f1d0c1f1e20a55f8f51aa9..f71ec125290db7da87355f444f7308826ee1c034 100644 (file)
@@ -243,7 +243,7 @@ void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
        struct ecryptfs_key_sig *key_sig, *key_sig_tmp;
 
        if (crypt_stat->tfm)
-               crypto_free_blkcipher(crypt_stat->tfm);
+               crypto_free_ablkcipher(crypt_stat->tfm);
        if (crypt_stat->hash_tfm)
                crypto_free_hash(crypt_stat->hash_tfm);
        list_for_each_entry_safe(key_sig, key_sig_tmp,
@@ -319,6 +319,22 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
        return i;
 }
 
+struct extent_crypt_result {
+       struct completion completion;
+       int rc;
+};
+
+static void extent_crypt_complete(struct crypto_async_request *req, int rc)
+{
+       struct extent_crypt_result *ecr = req->data;
+
+       if (rc == -EINPROGRESS)
+               return;
+
+       ecr->rc = rc;
+       complete(&ecr->completion);
+}
+
 /**
  * encrypt_scatterlist
  * @crypt_stat: Pointer to the crypt_stat struct to initialize.
@@ -334,11 +350,8 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                               struct scatterlist *src_sg, int size,
                               unsigned char *iv)
 {
-       struct blkcipher_desc desc = {
-               .tfm = crypt_stat->tfm,
-               .info = iv,
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
-       };
+       struct ablkcipher_request *req = NULL;
+       struct extent_crypt_result ecr;
        int rc = 0;
 
        BUG_ON(!crypt_stat || !crypt_stat->tfm
@@ -349,24 +362,47 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                ecryptfs_dump_hex(crypt_stat->key,
                                  crypt_stat->key_size);
        }
-       /* Consider doing this once, when the file is opened */
+
+       init_completion(&ecr.completion);
+
        mutex_lock(&crypt_stat->cs_tfm_mutex);
-       if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
-               rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
-                                            crypt_stat->key_size);
-               crypt_stat->flags |= ECRYPTFS_KEY_SET;
-       }
-       if (rc) {
-               ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
-                               rc);
+       req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
+       if (!req) {
                mutex_unlock(&crypt_stat->cs_tfm_mutex);
-               rc = -EINVAL;
+               rc = -ENOMEM;
                goto out;
        }
-       ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size);
-       crypto_blkcipher_encrypt_iv(&desc, dest_sg, src_sg, size);
+
+       ablkcipher_request_set_callback(req,
+                       CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+                       extent_crypt_complete, &ecr);
+       /* Consider doing this once, when the file is opened */
+       if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
+               rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
+                                             crypt_stat->key_size);
+               if (rc) {
+                       ecryptfs_printk(KERN_ERR,
+                                       "Error setting key; rc = [%d]\n",
+                                       rc);
+                       mutex_unlock(&crypt_stat->cs_tfm_mutex);
+                       rc = -EINVAL;
+                       goto out;
+               }
+               crypt_stat->flags |= ECRYPTFS_KEY_SET;
+       }
        mutex_unlock(&crypt_stat->cs_tfm_mutex);
+       ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size);
+       ablkcipher_request_set_crypt(req, src_sg, dest_sg, size, iv);
+       rc = crypto_ablkcipher_encrypt(req);
+       if (rc == -EINPROGRESS || rc == -EBUSY) {
+               struct extent_crypt_result *ecr = req->base.data;
+
+               wait_for_completion(&ecr->completion);
+               rc = ecr->rc;
+               INIT_COMPLETION(ecr->completion);
+       }
 out:
+       ablkcipher_request_free(req);
        return rc;
 }
 
@@ -624,35 +660,61 @@ static int decrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                               struct scatterlist *src_sg, int size,
                               unsigned char *iv)
 {
-       struct blkcipher_desc desc = {
-               .tfm = crypt_stat->tfm,
-               .info = iv,
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
-       };
+       struct ablkcipher_request *req = NULL;
+       struct extent_crypt_result ecr;
        int rc = 0;
 
-       /* Consider doing this once, when the file is opened */
+       BUG_ON(!crypt_stat || !crypt_stat->tfm
+              || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
+       if (unlikely(ecryptfs_verbosity > 0)) {
+               ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
+                               crypt_stat->key_size);
+               ecryptfs_dump_hex(crypt_stat->key,
+                                 crypt_stat->key_size);
+       }
+
+       init_completion(&ecr.completion);
+
        mutex_lock(&crypt_stat->cs_tfm_mutex);
-       rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
-                                    crypt_stat->key_size);
-       if (rc) {
-               ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
-                               rc);
+       req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
+       if (!req) {
                mutex_unlock(&crypt_stat->cs_tfm_mutex);
-               rc = -EINVAL;
+               rc = -ENOMEM;
                goto out;
        }
-       ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size);
-       rc = crypto_blkcipher_decrypt_iv(&desc, dest_sg, src_sg, size);
+
+       ablkcipher_request_set_callback(req,
+                       CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+                       extent_crypt_complete, &ecr);
+       /* Consider doing this once, when the file is opened */
+       if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
+               rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
+                                             crypt_stat->key_size);
+               if (rc) {
+                       ecryptfs_printk(KERN_ERR,
+                                       "Error setting key; rc = [%d]\n",
+                                       rc);
+                       mutex_unlock(&crypt_stat->cs_tfm_mutex);
+                       rc = -EINVAL;
+                       goto out;
+               }
+               crypt_stat->flags |= ECRYPTFS_KEY_SET;
+       }
        mutex_unlock(&crypt_stat->cs_tfm_mutex);
-       if (rc) {
-               ecryptfs_printk(KERN_ERR, "Error decrypting; rc = [%d]\n",
-                               rc);
-               goto out;
+       ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size);
+       ablkcipher_request_set_crypt(req, src_sg, dest_sg, size, iv);
+       rc = crypto_ablkcipher_decrypt(req);
+       if (rc == -EINPROGRESS || rc == -EBUSY) {
+               struct extent_crypt_result *ecr = req->base.data;
+
+               wait_for_completion(&ecr->completion);
+               rc = ecr->rc;
+               INIT_COMPLETION(ecr->completion);
        }
-       rc = size;
 out:
+       ablkcipher_request_free(req);
        return rc;
+
 }
 
 /**
@@ -746,8 +808,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
                                                    crypt_stat->cipher, "cbc");
        if (rc)
                goto out_unlock;
-       crypt_stat->tfm = crypto_alloc_blkcipher(full_alg_name, 0,
-                                                CRYPTO_ALG_ASYNC);
+       crypt_stat->tfm = crypto_alloc_ablkcipher(full_alg_name, 0, 0);
        kfree(full_alg_name);
        if (IS_ERR(crypt_stat->tfm)) {
                rc = PTR_ERR(crypt_stat->tfm);
@@ -757,7 +818,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
                                crypt_stat->cipher);
                goto out_unlock;
        }
-       crypto_blkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       crypto_ablkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
        rc = 0;
 out_unlock:
        mutex_unlock(&crypt_stat->cs_tfm_mutex);
index dd299b389d4e4fc36b7694dbf6acac0dcf9742f7..f622a733f7adc3ff1778e4f74790db507c61c824 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/nsproxy.h>
 #include <linux/backing-dev.h>
 #include <linux/ecryptfs.h>
+#include <linux/crypto.h>
 
 #define ECRYPTFS_DEFAULT_IV_BYTES 16
 #define ECRYPTFS_DEFAULT_EXTENT_SIZE 4096
@@ -233,7 +234,7 @@ struct ecryptfs_crypt_stat {
        size_t extent_shift;
        unsigned int extent_mask;
        struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
-       struct crypto_blkcipher *tfm;
+       struct crypto_ablkcipher *tfm;
        struct crypto_hash *hash_tfm; /* Crypto context for generating
                                       * the initialization vectors */
        unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE];
index 57ae9c8c66bfc6d98ae96643874710cdc3de321a..85e40d1c0a8fd64b358447ee09b26f906102ee1d 100644 (file)
@@ -2740,7 +2740,7 @@ static int do_last(struct nameidata *nd, struct path *path,
                if (error)
                        return error;
 
-               audit_inode(name, dir, 0);
+               audit_inode(name, dir, LOOKUP_PARENT);
                error = -EISDIR;
                /* trailing slashes? */
                if (nd->last.name[nd->last.len])
index 8ae5abfe6ba24fd699aed2e156a2b2f815695c04..27d74a2945151cfbdf76e41cd381b90a0efe4d09 100644 (file)
@@ -279,6 +279,7 @@ do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, str
 {
        struct svc_fh *current_fh = &cstate->current_fh;
        __be32 status;
+       int accmode = 0;
 
        /* We don't know the target directory, and therefore can not
        * set the change info
@@ -290,9 +291,19 @@ do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, str
 
        open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
                (open->op_iattr.ia_size == 0);
+       /*
+        * In the delegation case, the client is telling us about an
+        * open that it *already* performed locally, some time ago.  We
+        * should let it succeed now if possible.
+        *
+        * In the case of a CLAIM_FH open, on the other hand, the client
+        * may be counting on us to enforce permissions (the Linux 4.1
+        * client uses this for normal opens, for example).
+        */
+       if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH)
+               accmode = NFSD_MAY_OWNER_OVERRIDE;
 
-       status = do_open_permission(rqstp, current_fh, open,
-                                   NFSD_MAY_OWNER_OVERRIDE);
+       status = do_open_permission(rqstp, current_fh, open, accmode);
 
        return status;
 }
index 899ca26dd194d73f43234ba08447f1533428eff6..4e9a21db867ae60afcc14a6ca362e978495c4a5c 100644 (file)
@@ -146,7 +146,7 @@ out_no_tfm:
  * then disable recovery tracking.
  */
 static void
-legacy_recdir_name_error(int error)
+legacy_recdir_name_error(struct nfs4_client *clp, int error)
 {
        printk(KERN_ERR "NFSD: unable to generate recoverydir "
                        "name (%d).\n", error);
@@ -159,9 +159,7 @@ legacy_recdir_name_error(int error)
        if (error == -ENOENT) {
                printk(KERN_ERR "NFSD: disabling legacy clientid tracking. "
                        "Reboot recovery will not function correctly!\n");
-
-               /* the argument is ignored by the legacy exit function */
-               nfsd4_client_tracking_exit(NULL);
+               nfsd4_client_tracking_exit(clp->net);
        }
 }
 
@@ -184,7 +182,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
 
        status = nfs4_make_rec_clidname(dname, &clp->cl_name);
        if (status)
-               return legacy_recdir_name_error(status);
+               return legacy_recdir_name_error(clp, status);
 
        status = nfs4_save_creds(&original_cred);
        if (status < 0)
@@ -341,7 +339,7 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp)
 
        status = nfs4_make_rec_clidname(dname, &clp->cl_name);
        if (status)
-               return legacy_recdir_name_error(status);
+               return legacy_recdir_name_error(clp, status);
 
        status = mnt_want_write_file(nn->rec_file);
        if (status)
@@ -601,7 +599,7 @@ nfsd4_check_legacy_client(struct nfs4_client *clp)
 
        status = nfs4_make_rec_clidname(dname, &clp->cl_name);
        if (status) {
-               legacy_recdir_name_error(status);
+               legacy_recdir_name_error(clp, status);
                return status;
        }
 
index d0be29fa94cffba4e11355b20a270a11e608adef..6c80083a984fc192ebc73bad2a2edc86b4c25e11 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
+#include <linux/compat.h>
 
 #include <asm/ioctls.h>
 
@@ -857,6 +858,22 @@ fput_and_out:
        return ret;
 }
 
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE6(fanotify_mark,
+                               int, fanotify_fd, unsigned int, flags,
+                               __u32, mask0, __u32, mask1, int, dfd,
+                               const char  __user *, pathname)
+{
+       return sys_fanotify_mark(fanotify_fd, flags,
+#ifdef __BIG_ENDIAN
+                               ((__u64)mask1 << 32) | mask0,
+#else
+                               ((__u64)mask0 << 32) | mask1,
+#endif
+                                dfd, pathname);
+}
+#endif
+
 /*
  * fanotify_user_setup - Our initialization function.  Note that we cannot return
  * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
index e1a7779dd3cb18e72276569f8d2b0ce77e53a42c..f373bde8f545da481ba0a7caa873271dd599b30d 100644 (file)
@@ -49,8 +49,11 @@ static unsigned long romfs_get_unmapped_area(struct file *file,
                return (unsigned long) -EINVAL;
 
        offset += ROMFS_I(inode)->i_dataoffset;
-       if (offset > mtd->size - len)
+       if (offset >= mtd->size)
                return (unsigned long) -EINVAL;
+       /* the mapping mustn't extend beyond the EOF */
+       if ((offset + len) > mtd->size)
+               len = mtd->size - offset;
 
        ret = mtd_get_unmapped_area(mtd, len, offset, flags);
        if (ret == -EOPNOTSUPP)
index 5a6d718adf34825eb11bc4dd20cbcb8f1dced0ad..b20b03852f21dd722e405c4685f2c9851588ed98 100644 (file)
@@ -84,8 +84,13 @@ extern int audit_classify_arch(int arch);
 #define        AUDIT_TYPE_CHILD_DELETE 3       /* a child being deleted */
 #define        AUDIT_TYPE_CHILD_CREATE 4       /* a child being created */
 
+/* maximized args number that audit_socketcall can process */
+#define AUDITSC_ARGS           6
+
 struct filename;
 
+extern void audit_log_session_info(struct audit_buffer *ab);
+
 #ifdef CONFIG_AUDITSYSCALL
 /* These are defined in auditsc.c */
                                /* Public API */
@@ -120,7 +125,7 @@ static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
                                       unsigned long a1, unsigned long a2,
                                       unsigned long a3)
 {
-       if (unlikely(!audit_dummy_context()))
+       if (unlikely(current->audit_context))
                __audit_syscall_entry(arch, major, a0, a1, a2, a3);
 }
 static inline void audit_syscall_exit(void *pt_regs)
@@ -185,12 +190,10 @@ static inline int audit_get_sessionid(struct task_struct *tsk)
        return tsk->sessionid;
 }
 
-extern void audit_log_task_context(struct audit_buffer *ab);
-extern void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk);
 extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
 extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
 extern int __audit_bprm(struct linux_binprm *bprm);
-extern void __audit_socketcall(int nargs, unsigned long *args);
+extern int __audit_socketcall(int nargs, unsigned long *args);
 extern int __audit_sockaddr(int len, void *addr);
 extern void __audit_fd_pair(int fd1, int fd2);
 extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
@@ -224,10 +227,11 @@ static inline int audit_bprm(struct linux_binprm *bprm)
                return __audit_bprm(bprm);
        return 0;
 }
-static inline void audit_socketcall(int nargs, unsigned long *args)
+static inline int audit_socketcall(int nargs, unsigned long *args)
 {
        if (unlikely(!audit_dummy_context()))
-               __audit_socketcall(nargs, args);
+               return __audit_socketcall(nargs, args);
+       return 0;
 }
 static inline int audit_sockaddr(int len, void *addr)
 {
@@ -340,11 +344,6 @@ static inline int audit_get_sessionid(struct task_struct *tsk)
 {
        return -1;
 }
-static inline void audit_log_task_context(struct audit_buffer *ab)
-{ }
-static inline void audit_log_task_info(struct audit_buffer *ab,
-                                      struct task_struct *tsk)
-{ }
 static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
 { }
 static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
@@ -354,8 +353,10 @@ static inline int audit_bprm(struct linux_binprm *bprm)
 {
        return 0;
 }
-static inline void audit_socketcall(int nargs, unsigned long *args)
-{ }
+static inline int audit_socketcall(int nargs, unsigned long *args)
+{
+       return 0;
+}
 static inline void audit_fd_pair(int fd1, int fd2)
 { }
 static inline int audit_sockaddr(int len, void *addr)
@@ -390,6 +391,11 @@ static inline void audit_ptrace(struct task_struct *t)
 #define audit_signals 0
 #endif /* CONFIG_AUDITSYSCALL */
 
+static inline bool audit_loginuid_set(struct task_struct *tsk)
+{
+       return uid_valid(audit_get_loginuid(tsk));
+}
+
 #ifdef CONFIG_AUDIT
 /* These are defined in audit.c */
                                /* Public API */
@@ -429,14 +435,17 @@ static inline void            audit_log_secctx(struct audit_buffer *ab, u32 secid)
 { }
 #endif
 
+extern int audit_log_task_context(struct audit_buffer *ab);
+extern void audit_log_task_info(struct audit_buffer *ab,
+                               struct task_struct *tsk);
+
 extern int                 audit_update_lsm_rules(void);
 
                                /* Private API (for audit.c only) */
-extern int audit_filter_user(void);
+extern int audit_filter_user(int type);
 extern int audit_filter_type(int type);
 extern int  audit_receive_filter(int type, int pid, int seq,
-                               void *data, size_t datasz, kuid_t loginuid,
-                               u32 sessionid, u32 sid);
+                               void *data, size_t datasz);
 extern int audit_enabled;
 #else /* CONFIG_AUDIT */
 static inline __printf(4, 5)
@@ -476,6 +485,13 @@ static inline void audit_log_link_denied(const char *string,
 { }
 static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
 { }
+static inline int audit_log_task_context(struct audit_buffer *ab)
+{
+       return 0;
+}
+static inline void audit_log_task_info(struct audit_buffer *ab,
+                                      struct task_struct *tsk)
+{ }
 #define audit_enabled 0
 #endif /* CONFIG_AUDIT */
 static inline void audit_log_string(struct audit_buffer *ab, const char *buf)
index d53c35352ea9d14aa4a9c974fa559ab95f88e187..7f0c1dd0907904a831ab0f65fdcbb0d67a068b50 100644 (file)
@@ -673,6 +673,8 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
 asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
                                                 struct compat_timespec __user *interval);
 
+asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
+                                           int, const char __user *);
 #else
 
 #define is_compat_task() (0)
index 3c86faa597980fbad7e17d65d7668d1da5d40e60..8f0406230a0a4890b4247e60215795c11e0a30d3 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/completion.h>
 #include <linux/hrtimer.h>
 
-#define CPUIDLE_STATE_MAX      8
+#define CPUIDLE_STATE_MAX      10
 #define CPUIDLE_NAME_LEN       16
 #define CPUIDLE_DESC_LEN       32
 
index 1e483fa7afb41b681ba802061d4ae5ab4ad4676a..3cd32478f2fd095817c7fb0b9f6558d336020bde 100644 (file)
@@ -79,11 +79,26 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
                            struct bio_vec *biovec, int max_size);
 
+/*
+ * These iteration functions are typically used to check (and combine)
+ * properties of underlying devices.
+ * E.g. Does at least one underlying device support flush?
+ *      Does any underlying device not support WRITE_SAME?
+ *
+ * The callout function is called once for each contiguous section of
+ * an underlying device.  State can be maintained in *data.
+ * Return non-zero to stop iterating through any further devices.
+ */
 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
                                           struct dm_dev *dev,
                                           sector_t start, sector_t len,
                                           void *data);
 
+/*
+ * This function must iterate through each section of device used by the
+ * target until it encounters a non-zero return code, which it then returns.
+ * Returns zero if no callout returned non-zero.
+ */
 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
                                      iterate_devices_callout_fn fn,
                                      void *data);
index f83e17a40e8b848185b649836274b57acdc332c1..99d0fbcbaf79eff04c8ab56531e02aa46e953977 100644 (file)
@@ -90,6 +90,8 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  *            not set this, then the ftrace infrastructure will add recursion
  *            protection for the caller.
  * STUB   - The ftrace_ops is just a place holder.
+ * INITIALIZED - The ftrace_ops has already been initialized (first use time
+ *            register_ftrace_function() is called, it will initialized the ops)
  */
 enum {
        FTRACE_OPS_FL_ENABLED                   = 1 << 0,
@@ -100,6 +102,7 @@ enum {
        FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED    = 1 << 5,
        FTRACE_OPS_FL_RECURSION_SAFE            = 1 << 6,
        FTRACE_OPS_FL_STUB                      = 1 << 7,
+       FTRACE_OPS_FL_INITIALIZED               = 1 << 8,
 };
 
 struct ftrace_ops {
@@ -110,6 +113,7 @@ struct ftrace_ops {
 #ifdef CONFIG_DYNAMIC_FTRACE
        struct ftrace_hash              *notrace_hash;
        struct ftrace_hash              *filter_hash;
+       struct mutex                    regex_lock;
 #endif
 };
 
index 34e00fb49becf506865c18ffafefe23c413ea5c4..4372658c73ae5eddd3dd6570d4dfac8c5d8825c8 100644 (file)
@@ -293,6 +293,7 @@ struct ftrace_event_file {
         * caching and such. Which is mostly OK ;-)
         */
        unsigned long           flags;
+       atomic_t                sm_ref; /* soft-mode reference counter */
 };
 
 #define __TRACE_EVENT_FLAGS(name, value)                               \
index af1b86d46f6e714e4feb5aeab303fb76f47a1234..0c48991b0402d0d93110b8a4fa9cd42d615279c2 100644 (file)
@@ -515,7 +515,7 @@ struct hid_device {                                                 /* device report descriptor */
        struct dentry *debug_rdesc;
        struct dentry *debug_events;
        struct list_head debug_list;
-       struct mutex debug_list_lock;
+       spinlock_t  debug_list_lock;
        wait_queue_head_t debug_wait;
 };
 
index 06b0ed0154a4b779e637eaee48ec84cb8818498e..b0dc87a2a376e2583d2d28363a0cdd13f859536c 100644 (file)
@@ -146,6 +146,7 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
 #define IEEE80211_MAX_RTS_THRESHOLD    2353
 #define IEEE80211_MAX_AID              2007
 #define IEEE80211_MAX_TIM_LEN          251
+#define IEEE80211_MAX_MESH_PEERINGS    63
 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
    6.2.1.1.2.
 
@@ -1829,6 +1830,15 @@ enum ieee80211_key_len {
        WLAN_KEY_LEN_AES_CMAC = 16,
 };
 
+#define IEEE80211_WEP_IV_LEN           4
+#define IEEE80211_WEP_ICV_LEN          4
+#define IEEE80211_CCMP_HDR_LEN         8
+#define IEEE80211_CCMP_MIC_LEN         8
+#define IEEE80211_CCMP_PN_LEN          6
+#define IEEE80211_TKIP_IV_LEN          8
+#define IEEE80211_TKIP_ICV_LEN         4
+#define IEEE80211_CMAC_PN_LEN          6
+
 /* Public action codes */
 enum ieee80211_pub_actioncode {
        WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4,
index 2b85c521f7375452ed4c0fd54d7cba37b60f4dde..c12916248469f577795d28e96c4f86be747fbf5b 100644 (file)
 #define PCI_DEVICE_ID_TIGON3_5705M_2   0x165e
 #define PCI_DEVICE_ID_NX2_57712                0x1662
 #define PCI_DEVICE_ID_NX2_57712E       0x1663
+#define PCI_DEVICE_ID_NX2_57712_MF     0x1663
 #define PCI_DEVICE_ID_TIGON3_5714      0x1668
 #define PCI_DEVICE_ID_TIGON3_5714S     0x1669
 #define PCI_DEVICE_ID_TIGON3_5780      0x166a
 #define PCI_DEVICE_ID_TIGON3_5780S     0x166b
 #define PCI_DEVICE_ID_TIGON3_5705F     0x166e
+#define PCI_DEVICE_ID_NX2_57712_VF     0x166f
 #define PCI_DEVICE_ID_TIGON3_5754M     0x1672
 #define PCI_DEVICE_ID_TIGON3_5755M     0x1673
 #define PCI_DEVICE_ID_TIGON3_5756      0x1674
 #define PCI_DEVICE_ID_TIGON3_5787      0x169b
 #define PCI_DEVICE_ID_TIGON3_5788      0x169c
 #define PCI_DEVICE_ID_TIGON3_5789      0x169d
+#define PCI_DEVICE_ID_NX2_57840_4_10   0x16a1
+#define PCI_DEVICE_ID_NX2_57840_2_20   0x16a2
+#define PCI_DEVICE_ID_NX2_57840_MF     0x16a4
 #define PCI_DEVICE_ID_NX2_57800_MF     0x16a5
 #define PCI_DEVICE_ID_TIGON3_5702X     0x16a6
 #define PCI_DEVICE_ID_TIGON3_5703X     0x16a7
 #define PCI_DEVICE_ID_TIGON3_5704S     0x16a8
 #define PCI_DEVICE_ID_NX2_57800_VF     0x16a9
 #define PCI_DEVICE_ID_NX2_5706S                0x16aa
-#define PCI_DEVICE_ID_NX2_57840_MF     0x16a4
 #define PCI_DEVICE_ID_NX2_5708S                0x16ac
 #define PCI_DEVICE_ID_NX2_57840_VF     0x16ad
 #define PCI_DEVICE_ID_NX2_57810_MF     0x16ae
diff --git a/include/linux/platform_data/net-cw1200.h b/include/linux/platform_data/net-cw1200.h
new file mode 100644 (file)
index 0000000..c6fbc3c
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CW1200_PLAT_H_INCLUDED
+#define CW1200_PLAT_H_INCLUDED
+
+struct cw1200_platform_data_spi {
+       u8 spi_bits_per_word;           /* REQUIRED */
+       u16 ref_clk;                    /* REQUIRED (in KHz) */
+
+       /* All others are optional */
+       bool have_5ghz;
+       int reset;                     /* GPIO to RSTn signal (0 disables) */
+       int powerup;                   /* GPIO to POWERUP signal (0 disables) */
+       int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata,
+                         bool enable); /* Control 3v3 / 1v8 supply */
+       int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata,
+                       bool enable); /* Control CLK32K */
+       const u8 *macaddr;  /* if NULL, use cw1200_mac_template module parameter */
+       const char *sdd_file;  /* if NULL, will use default for detected hw type */
+};
+
+struct cw1200_platform_data_sdio {
+       u16 ref_clk;                    /* REQUIRED (in KHz) */
+
+       /* All others are optional */
+       bool have_5ghz;
+       bool no_nptb;       /* SDIO hardware does not support non-power-of-2-blocksizes */
+       int reset;          /* GPIO to RSTn signal (0 disables) */
+       int powerup;        /* GPIO to POWERUP signal (0 disables) */
+       int irq;            /* IRQ line or 0 to use SDIO IRQ */
+       int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata,
+                         bool enable); /* Control 3v3 / 1v8 supply */
+       int (*clk_ctrl)(const struct cw1200_platform_data_sdio *pdata,
+                       bool enable); /* Control CLK32K */
+       const u8 *macaddr;  /* if NULL, use cw1200_mac_template module parameter */
+       const char *sdd_file;  /* if NULL, will use default for detected hw type */
+};
+
+
+/* An example of SPI support in your board setup file:
+
+   static struct cw1200_platform_data_spi cw1200_platform_data = {
+       .ref_clk = 38400,
+       .spi_bits_per_word = 16,
+       .reset = GPIO_RF_RESET,
+       .powerup = GPIO_RF_POWERUP,
+       .macaddr = wifi_mac_addr,
+       .sdd_file = "sdd_sagrad_1091_1098.bin",
+  };
+  static struct spi_board_info myboard_spi_devices[] __initdata = {
+       {
+               .modalias = "cw1200_wlan_spi",
+               .max_speed_hz = 52000000,
+               .bus_num = 0,
+               .irq = WIFI_IRQ,
+               .platform_data = &cw1200_platform_data,
+               .chip_select = 0,
+       },
+  };
+
+ */
+
+/* An example of SDIO support in your board setup file:
+
+  static struct cw1200_platform_data_sdio my_cw1200_platform_data = {
+       .ref_clk = 38400,
+       .have_5ghz = false,
+       .sdd_file = "sdd_myplatform.bin",
+  };
+  cw1200_sdio_set_platform_data(&my_cw1200_platform_data);
+
+ */
+
+void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata);
+
+#endif /* CW1200_PLAT_H_INCLUDED */
index caa8f4d0186b742c3effc99c296ac99724f2f4d1..178a8d909f14a3dcdcbc0ce255572975c8b3b221 100644 (file)
@@ -593,6 +593,7 @@ struct signal_struct {
 #endif
 #ifdef CONFIG_AUDIT
        unsigned audit_tty;
+       unsigned audit_tty_log_passwd;
        struct tty_audit_buf *tty_audit_buf;
 #endif
 #ifdef CONFIG_CGROUPS
index afe79d40a99eaf36dd4eea67c5a0234a40eff683..6535e4718fdebeee689a1d214da4ca39f060f437 100644 (file)
@@ -20,6 +20,18 @@ struct ssb_pflash {
        u32 window_size;
 };
 
+#ifdef CONFIG_SSB_SFLASH
+struct ssb_sflash {
+       bool present;
+       u32 window;
+       u32 blocksize;
+       u16 numblocks;
+       u32 size;
+
+       void *priv;
+};
+#endif
+
 struct ssb_mipscore {
        struct ssb_device *dev;
 
@@ -27,6 +39,9 @@ struct ssb_mipscore {
        struct ssb_serial_port serial_ports[4];
 
        struct ssb_pflash pflash;
+#ifdef CONFIG_SSB_SFLASH
+       struct ssb_sflash sflash;
+#endif
 };
 
 extern void ssb_mipscore_init(struct ssb_mipscore *mcore);
index 3a7256955b10b75a03fe6873f197a5615a44b0a7..f9f931c89e3e2deeda4da48138b6237db968b9c9 100644 (file)
 #define SSB_SPROMSIZE_WORDS_R4         220
 #define SSB_SPROMSIZE_BYTES_R123       (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16))
 #define SSB_SPROMSIZE_BYTES_R4         (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16))
+#define SSB_SPROMSIZE_WORDS_R10                230
 #define SSB_SPROM_BASE1                        0x1000
 #define SSB_SPROM_BASE31               0x0800
 #define SSB_SPROM_REVISION             0x007E
index 7e92bd86a808cfbdf31bf38f1165ab30fe93c2ff..8780bd2a272ab6672c8f6c32340b7d1ab2831acb 100644 (file)
@@ -575,8 +575,7 @@ extern void tty_audit_exit(void);
 extern void tty_audit_fork(struct signal_struct *sig);
 extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
 extern void tty_audit_push(struct tty_struct *tty);
-extern int tty_audit_push_task(struct task_struct *tsk,
-                              kuid_t loginuid, u32 sessionid);
+extern int tty_audit_push_current(void);
 #else
 static inline void tty_audit_add_data(struct tty_struct *tty,
                unsigned char *data, size_t size, unsigned icanon)
@@ -594,8 +593,7 @@ static inline void tty_audit_fork(struct signal_struct *sig)
 static inline void tty_audit_push(struct tty_struct *tty)
 {
 }
-static inline int tty_audit_push_task(struct task_struct *tsk,
-                                     kuid_t loginuid, u32 sessionid)
+static inline int tty_audit_push_current(void)
 {
        return 0;
 }
index e0512aaef4b85df8d05c7f91983f1054ee6a7628..3c592cf473dae89f5fb529bcfdeb445612604622 100644 (file)
@@ -107,7 +107,6 @@ enum {
        HCI_MGMT,
        HCI_PAIRABLE,
        HCI_SERVICE_CACHE,
-       HCI_LINK_KEYS,
        HCI_DEBUG_KEYS,
        HCI_UNREGISTER,
 
index 7cb6d360d14702f04bebc0a2ad88a9466b33a607..f77885ea78c284dd724a63b131e31346ad4308f7 100644 (file)
@@ -117,13 +117,6 @@ struct oob_data {
        u8 randomizer[16];
 };
 
-struct le_scan_params {
-       u8 type;
-       u16 interval;
-       u16 window;
-       int timeout;
-};
-
 #define HCI_MAX_SHORT_NAME_LENGTH      10
 
 struct amp_assoc {
@@ -283,9 +276,6 @@ struct hci_dev {
 
        struct delayed_work     le_scan_disable;
 
-       struct work_struct      le_scan;
-       struct le_scan_params   le_scan_params;
-
        __s8                    adv_tx_power;
        __u8                    adv_data[HCI_MAX_AD_LENGTH];
        __u8                    adv_data_len;
@@ -432,6 +422,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
                                      struct inquiry_entry *ie);
 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
                              bool name_known, bool *ssp);
+void hci_inquiry_cache_flush(struct hci_dev *hdev);
 
 /* ----- HCI Connections ----- */
 enum {
@@ -1114,6 +1105,16 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
                                         BIT(BDADDR_LE_PUBLIC) | \
                                         BIT(BDADDR_LE_RANDOM))
 
+/* These LE scan and inquiry parameters were chosen according to LE General
+ * Discovery Procedure specification.
+ */
+#define DISCOV_LE_SCAN_WIN             0x12
+#define DISCOV_LE_SCAN_INT             0x12
+#define DISCOV_LE_TIMEOUT              msecs_to_jiffies(10240)
+#define DISCOV_INTERLEAVED_TIMEOUT     msecs_to_jiffies(5120)
+#define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
+#define DISCOV_BREDR_INQUIRY_LEN       0x08
+
 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
 int mgmt_index_added(struct hci_dev *hdev);
 int mgmt_index_removed(struct hci_dev *hdev);
@@ -1169,10 +1170,7 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                      u8 ssp, u8 *eir, u16 eir_len);
 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                     u8 addr_type, s8 rssi, u8 *name, u8 name_len);
-int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status);
-int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status);
 int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
-int mgmt_interleaved_discovery(struct hci_dev *hdev);
 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 bool mgmt_valid_hdev(struct hci_dev *hdev);
@@ -1212,11 +1210,6 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
                                        u16 latency, u16 to_multiplier);
 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
                                                        __u8 ltk[16]);
-int hci_do_inquiry(struct hci_dev *hdev, u8 length);
-int hci_cancel_inquiry(struct hci_dev *hdev);
-int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
-               int timeout);
-int hci_cancel_le_scan(struct hci_dev *hdev);
 
 u8 bdaddr_to_le(u8 bdaddr_type);
 
index fb94cf13c777109e5d9c43b55758ec1ab275aa2e..1a966afbbfa8408d159503f711f00855e0957d4b 100644 (file)
@@ -242,7 +242,7 @@ struct l2cap_conn_rsp {
 #define L2CAP_CID_SIGNALING    0x0001
 #define L2CAP_CID_CONN_LESS    0x0002
 #define L2CAP_CID_A2MP         0x0003
-#define L2CAP_CID_LE_DATA      0x0004
+#define L2CAP_CID_ATT          0x0004
 #define L2CAP_CID_LE_SIGNALING 0x0005
 #define L2CAP_CID_SMP          0x0006
 #define L2CAP_CID_DYN_START    0x0040
index 26b5b692c22bc93cbaa42f8cf6b4d9951d49c582..7b0730aeb892621abc6419ec9c345b141e184b71 100644 (file)
@@ -188,6 +188,8 @@ struct ieee80211_channel {
  *     when used with 802.11g (on the 2.4 GHz band); filled by the
  *     core code when registering the wiphy.
  * @IEEE80211_RATE_ERP_G: This is an ERP rate in 802.11g mode.
+ * @IEEE80211_RATE_SUPPORTS_5MHZ: Rate can be used in 5 MHz mode
+ * @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode
  */
 enum ieee80211_rate_flags {
        IEEE80211_RATE_SHORT_PREAMBLE   = 1<<0,
@@ -195,6 +197,8 @@ enum ieee80211_rate_flags {
        IEEE80211_RATE_MANDATORY_B      = 1<<2,
        IEEE80211_RATE_MANDATORY_G      = 1<<3,
        IEEE80211_RATE_ERP_G            = 1<<4,
+       IEEE80211_RATE_SUPPORTS_5MHZ    = 1<<5,
+       IEEE80211_RATE_SUPPORTS_10MHZ   = 1<<6,
 };
 
 /**
@@ -432,6 +436,30 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
                             const struct cfg80211_chan_def *chandef,
                             u32 prohibited_flags);
 
+/**
+ * ieee80211_chandef_rate_flags - returns rate flags for a channel
+ *
+ * In some channel types, not all rates may be used - for example CCK
+ * rates may not be used in 5/10 MHz channels.
+ *
+ * @chandef: channel definition for the channel
+ *
+ * Returns: rate flags which apply for this channel
+ */
+static inline enum ieee80211_rate_flags
+ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
+{
+       switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_5:
+               return IEEE80211_RATE_SUPPORTS_5MHZ;
+       case NL80211_CHAN_WIDTH_10:
+               return IEEE80211_RATE_SUPPORTS_10MHZ;
+       default:
+               break;
+       }
+       return 0;
+}
+
 /**
  * enum survey_info_flags - survey information flags
  *
@@ -753,6 +781,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
  * @STATION_INFO_LOCAL_PM: @local_pm filled
  * @STATION_INFO_PEER_PM: @peer_pm filled
  * @STATION_INFO_NONPEER_PM: @nonpeer_pm filled
+ * @STATION_INFO_CHAIN_SIGNAL: @chain_signal filled
+ * @STATION_INFO_CHAIN_SIGNAL_AVG: @chain_signal_avg filled
  */
 enum station_info_flags {
        STATION_INFO_INACTIVE_TIME      = 1<<0,
@@ -781,6 +811,8 @@ enum station_info_flags {
        STATION_INFO_NONPEER_PM         = 1<<23,
        STATION_INFO_RX_BYTES64         = 1<<24,
        STATION_INFO_TX_BYTES64         = 1<<25,
+       STATION_INFO_CHAIN_SIGNAL       = 1<<26,
+       STATION_INFO_CHAIN_SIGNAL_AVG   = 1<<27,
 };
 
 /**
@@ -857,6 +889,8 @@ struct sta_bss_parameters {
        u16 beacon_interval;
 };
 
+#define IEEE80211_MAX_CHAINS   4
+
 /**
  * struct station_info - station information
  *
@@ -874,6 +908,9 @@ struct sta_bss_parameters {
  *     For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
  * @signal_avg: Average signal strength, type depends on the wiphy's signal_type.
  *     For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
+ * @chains: bitmask for filled values in @chain_signal, @chain_signal_avg
+ * @chain_signal: per-chain signal strength of last received packet in dBm
+ * @chain_signal_avg: per-chain signal strength average in dBm
  * @txrate: current unicast bitrate from this station
  * @rxrate: current unicast bitrate to this station
  * @rx_packets: packets received from this station
@@ -909,6 +946,11 @@ struct station_info {
        u8 plink_state;
        s8 signal;
        s8 signal_avg;
+
+       u8 chains;
+       s8 chain_signal[IEEE80211_MAX_CHAINS];
+       s8 chain_signal_avg[IEEE80211_MAX_CHAINS];
+
        struct rate_info txrate;
        struct rate_info rxrate;
        u32 rx_packets;
@@ -947,6 +989,7 @@ struct station_info {
  * @MONITOR_FLAG_CONTROL: pass control frames
  * @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering
  * @MONITOR_FLAG_COOK_FRAMES: report frames after processing
+ * @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address
  */
 enum monitor_flags {
        MONITOR_FLAG_FCSFAIL            = 1<<NL80211_MNTR_FLAG_FCSFAIL,
@@ -954,6 +997,7 @@ enum monitor_flags {
        MONITOR_FLAG_CONTROL            = 1<<NL80211_MNTR_FLAG_CONTROL,
        MONITOR_FLAG_OTHER_BSS          = 1<<NL80211_MNTR_FLAG_OTHER_BSS,
        MONITOR_FLAG_COOK_FRAMES        = 1<<NL80211_MNTR_FLAG_COOK_FRAMES,
+       MONITOR_FLAG_ACTIVE             = 1<<NL80211_MNTR_FLAG_ACTIVE,
 };
 
 /**
@@ -1108,6 +1152,9 @@ struct bss_parameters {
  *     setting for new peer links.
  * @dot11MeshAwakeWindowDuration: The duration in TUs the STA will remain awake
  *     after transmitting its beacon.
+ * @plink_timeout: If no tx activity is seen from a STA we've established
+ *     peering with for longer than this time (in seconds), then remove it
+ *     from the STA's list of peers.  Default is 30 minutes.
  */
 struct mesh_config {
        u16 dot11MeshRetryTimeout;
@@ -1137,6 +1184,7 @@ struct mesh_config {
        u16 dot11MeshHWMPconfirmationInterval;
        enum nl80211_mesh_power_mode power_mode;
        u16 dot11MeshAwakeWindowDuration;
+       u32 plink_timeout;
 };
 
 /**
@@ -1147,6 +1195,7 @@ struct mesh_config {
  * @sync_method: which synchronization method to use
  * @path_sel_proto: which path selection protocol to use
  * @path_metric: which metric to use
+ * @auth_id: which authentication method this mesh is using
  * @ie: vendor information elements (optional)
  * @ie_len: length of vendor information elements
  * @is_authenticated: this mesh requires authentication
@@ -1155,6 +1204,7 @@ struct mesh_config {
  * @dtim_period: DTIM period to use
  * @beacon_interval: beacon interval to use
  * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
+ * @basic_rates: basic rates to use when creating the mesh
  *
  * These parameters are fixed when the mesh is created.
  */
@@ -1165,6 +1215,7 @@ struct mesh_setup {
        u8 sync_method;
        u8 path_sel_proto;
        u8 path_metric;
+       u8 auth_id;
        const u8 *ie;
        u8 ie_len;
        bool is_authenticated;
@@ -1173,6 +1224,7 @@ struct mesh_setup {
        u8 dtim_period;
        u16 beacon_interval;
        int mcast_rate[IEEE80211_NUM_BANDS];
+       u32 basic_rates;
 };
 
 /**
@@ -1241,6 +1293,7 @@ struct cfg80211_ssid {
  * @scan_start: time (in jiffies) when the scan started
  * @wdev: the wireless device to scan for
  * @aborted: (internal) scan request was notified as aborted
+ * @notified: (internal) scan request was notified as done or aborted
  * @no_cck: used to send probe requests at non CCK rate in 2GHz band
  */
 struct cfg80211_scan_request {
@@ -1258,7 +1311,7 @@ struct cfg80211_scan_request {
        /* internal */
        struct wiphy *wiphy;
        unsigned long scan_start;
-       bool aborted;
+       bool aborted, notified;
        bool no_cck;
 
        /* keep last */
@@ -1406,7 +1459,8 @@ const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie);
  * This structure provides information needed to complete IEEE 802.11
  * authentication.
  *
- * @bss: The BSS to authenticate with.
+ * @bss: The BSS to authenticate with, the callee must obtain a reference
+ *     to it if it needs to keep it.
  * @auth_type: Authentication type (algorithm)
  * @ie: Extra IEs to add to Authentication frame or %NULL
  * @ie_len: Length of ie buffer in octets
@@ -1444,11 +1498,10 @@ enum cfg80211_assoc_req_flags {
  *
  * This structure provides information needed to complete IEEE 802.11
  * (re)association.
- * @bss: The BSS to associate with. If the call is successful the driver
- *     is given a reference that it must release, normally via a call to
- *     cfg80211_send_rx_assoc(), or, if association timed out, with a
- *     call to cfg80211_put_bss() (in addition to calling
- *     cfg80211_send_assoc_timeout())
+ * @bss: The BSS to associate with. If the call is successful the driver is
+ *     given a reference that it must give back to cfg80211_send_rx_assoc()
+ *     or to cfg80211_assoc_timeout(). To ensure proper refcounting, new
+ *     association requests while already associating must be rejected.
  * @ie: Extra IEs to add to (Re)Association Request frame or %NULL
  * @ie_len: Length of ie buffer in octets
  * @use_mfp: Use management frame protection (IEEE 802.11w) in this association
@@ -1850,7 +1903,9 @@ struct cfg80211_update_ft_ies_params {
  * @get_mpath: get a mesh path for the given parameters
  * @dump_mpath: dump mesh path callback -- resume dump at index @idx
  * @join_mesh: join the mesh network with the specified parameters
+ *     (invoked with the wireless_dev mutex held)
  * @leave_mesh: leave the current mesh network
+ *     (invoked with the wireless_dev mutex held)
  *
  * @get_mesh_config: Get the current mesh configuration
  *
@@ -1877,20 +1932,28 @@ struct cfg80211_update_ft_ies_params {
  *     the scan/scan_done bracket too.
  *
  * @auth: Request to authenticate with the specified peer
+ *     (invoked with the wireless_dev mutex held)
  * @assoc: Request to (re)associate with the specified peer
+ *     (invoked with the wireless_dev mutex held)
  * @deauth: Request to deauthenticate from the specified peer
+ *     (invoked with the wireless_dev mutex held)
  * @disassoc: Request to disassociate from the specified peer
+ *     (invoked with the wireless_dev mutex held)
  *
  * @connect: Connect to the ESS with the specified parameters. When connected,
  *     call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS.
  *     If the connection fails for some reason, call cfg80211_connect_result()
  *     with the status from the AP.
+ *     (invoked with the wireless_dev mutex held)
  * @disconnect: Disconnect from the BSS/ESS.
+ *     (invoked with the wireless_dev mutex held)
  *
  * @join_ibss: Join the specified IBSS (or create if necessary). Once done, call
  *     cfg80211_ibss_joined(), also call that function when changing BSSID due
  *     to a merge.
+ *     (invoked with the wireless_dev mutex held)
  * @leave_ibss: Leave the IBSS.
+ *     (invoked with the wireless_dev mutex held)
  *
  * @set_mcast_rate: Set the specified multicast rate (only if vif is in ADHOC or
  *     MESH mode)
@@ -2307,6 +2370,7 @@ struct cfg80211_ops {
  *     responds to probe-requests in hardware.
  * @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
  * @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
+ * @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
  */
 enum wiphy_flags {
        WIPHY_FLAG_CUSTOM_REGULATORY            = BIT(0),
@@ -2330,6 +2394,7 @@ enum wiphy_flags {
        WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD        = BIT(19),
        WIPHY_FLAG_OFFCHAN_TX                   = BIT(20),
        WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL        = BIT(21),
+       WIPHY_FLAG_SUPPORTS_5_10_MHZ            = BIT(22),
 };
 
 /**
@@ -2556,6 +2621,9 @@ struct wiphy_wowlan_support {
  *     may request, if implemented.
  *
  * @wowlan: WoWLAN support information
+ * @wowlan_config: current WoWLAN configuration; this should usually not be
+ *     used since access to it is necessarily racy, use the parameter passed
+ *     to the suspend() operation instead.
  *
  * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features.
  * @ht_capa_mod_mask:  Specify what ht_cap values can be over-ridden.
@@ -2622,7 +2690,8 @@ struct wiphy {
        u32 hw_version;
 
 #ifdef CONFIG_PM
-       struct wiphy_wowlan_support wowlan;
+       const struct wiphy_wowlan_support *wowlan;
+       struct cfg80211_wowlan *wowlan_config;
 #endif
 
        u16 max_remain_on_channel_duration;
@@ -2820,7 +2889,7 @@ struct cfg80211_cached_keys;
  * @current_bss: (private) Used by the internal configuration code
  * @channel: (private) Used by the internal configuration code to track
  *     the user-set AP, monitor and WDS channel
- * @preset_chan: (private) Used by the internal configuration code to
+ * @preset_chandef: (private) Used by the internal configuration code to
  *     track the channel to be used for AP later
  * @bssid: (private) Used by the internal configuration code
  * @ssid: (private) Used by the internal configuration code
@@ -2834,14 +2903,23 @@ struct cfg80211_cached_keys;
  *     by cfg80211 on change_interface
  * @mgmt_registrations: list of registrations for management frames
  * @mgmt_registrations_lock: lock for the list
- * @mtx: mutex used to lock data in this struct
- * @cleanup_work: work struct used for cleanup that can't be done directly
+ * @mtx: mutex used to lock data in this struct, may be used by drivers
+ *     and some API functions require it held
  * @beacon_interval: beacon interval used on this device for transmitting
  *     beacons, 0 when not valid
  * @address: The address for this device, valid only if @netdev is %NULL
  * @p2p_started: true if this is a P2P Device that has been started
  * @cac_started: true if DFS channel availability check has been started
  * @cac_start_time: timestamp (jiffies) when the dfs state was entered.
+ * @ps: powersave mode is enabled
+ * @ps_timeout: dynamic powersave timeout
+ * @ap_unexpected_nlportid: (private) netlink port ID of application
+ *     registered for unexpected class 3 frames (AP mode)
+ * @conn: (private) cfg80211 software SME connection state machine data
+ * @connect_keys: (private) keys to set after connection is established
+ * @ibss_fixed: (private) IBSS is using fixed BSSID
+ * @event_list: (private) list for internal event processing
+ * @event_lock: (private) lock for event list
  */
 struct wireless_dev {
        struct wiphy *wiphy;
@@ -2858,8 +2936,6 @@ struct wireless_dev {
 
        struct mutex mtx;
 
-       struct work_struct cleanup_work;
-
        bool use_4addr, p2p_started;
 
        u8 address[ETH_ALEN] __aligned(sizeof(u16));
@@ -2867,11 +2943,6 @@ struct wireless_dev {
        /* currently used for IBSS and SME - might be rearranged later */
        u8 ssid[IEEE80211_MAX_SSID_LEN];
        u8 ssid_len, mesh_id_len, mesh_id_up_len;
-       enum {
-               CFG80211_SME_IDLE,
-               CFG80211_SME_CONNECTING,
-               CFG80211_SME_CONNECTED,
-       } sme_state;
        struct cfg80211_conn *conn;
        struct cfg80211_cached_keys *connect_keys;
 
@@ -2989,6 +3060,15 @@ struct ieee80211_rate *
 ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
                            u32 basic_rates, int bitrate);
 
+/**
+ * ieee80211_mandatory_rates - get mandatory rates for a given band
+ * @sband: the band to look for rates in
+ *
+ * This function returns a bitmap of the mandatory rates for the given
+ * band, bits are set according to the rate position in the bitrates array.
+ */
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband);
+
 /*
  * Radiotap parsing functions -- for controlled injection support
  *
@@ -3392,122 +3472,87 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
 void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
 
 /**
- * cfg80211_send_rx_auth - notification of processed authentication
+ * cfg80211_rx_mlme_mgmt - notification of processed MLME management frame
  * @dev: network device
  * @buf: authentication frame (header + body)
  * @len: length of the frame data
  *
- * This function is called whenever an authentication has been processed in
- * station mode. The driver is required to call either this function or
- * cfg80211_send_auth_timeout() to indicate the result of cfg80211_ops::auth()
- * call. This function may sleep.
+ * This function is called whenever an authentication, disassociation or
+ * deauthentication frame has been received and processed in station mode.
+ * After being asked to authenticate via cfg80211_ops::auth() the driver must
+ * call either this function or cfg80211_auth_timeout().
+ * After being asked to associate via cfg80211_ops::assoc() the driver must
+ * call either this function or cfg80211_auth_timeout().
+ * While connected, the driver must calls this for received and processed
+ * disassociation and deauthentication frames. If the frame couldn't be used
+ * because it was unprotected, the driver must call the function
+ * cfg80211_rx_unprot_mlme_mgmt() instead.
+ *
+ * This function may sleep. The caller must hold the corresponding wdev's mutex.
  */
-void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len);
+void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len);
 
 /**
- * cfg80211_send_auth_timeout - notification of timed out authentication
+ * cfg80211_auth_timeout - notification of timed out authentication
  * @dev: network device
  * @addr: The MAC address of the device with which the authentication timed out
  *
- * This function may sleep.
+ * This function may sleep. The caller must hold the corresponding wdev's
+ * mutex.
  */
-void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr);
+void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr);
 
 /**
- * cfg80211_send_rx_assoc - notification of processed association
+ * cfg80211_rx_assoc_resp - notification of processed association response
  * @dev: network device
- * @bss: the BSS struct association was requested for, the struct reference
- *     is owned by cfg80211 after this call
- * @buf: (re)association response frame (header + body)
+ * @bss: the BSS that association was requested with, ownership of the pointer
+ *     moves to cfg80211 in this call
+ * @buf: authentication frame (header + body)
  * @len: length of the frame data
  *
- * This function is called whenever a (re)association response has been
- * processed in station mode. The driver is required to call either this
- * function or cfg80211_send_assoc_timeout() to indicate the result of
- * cfg80211_ops::assoc() call. This function may sleep.
+ * After being asked to associate via cfg80211_ops::assoc() the driver must
+ * call either this function or cfg80211_auth_timeout().
+ *
+ * This function may sleep. The caller must hold the corresponding wdev's mutex.
  */
-void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
+void cfg80211_rx_assoc_resp(struct net_device *dev,
+                           struct cfg80211_bss *bss,
                            const u8 *buf, size_t len);
 
 /**
- * cfg80211_send_assoc_timeout - notification of timed out association
+ * cfg80211_assoc_timeout - notification of timed out association
  * @dev: network device
- * @addr: The MAC address of the device with which the association timed out
+ * @bss: The BSS entry with which association timed out.
  *
- * This function may sleep.
+ * This function may sleep. The caller must hold the corresponding wdev's mutex.
  */
-void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr);
+void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss);
 
 /**
- * cfg80211_send_deauth - notification of processed deauthentication
+ * cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame
  * @dev: network device
- * @buf: deauthentication frame (header + body)
+ * @buf: 802.11 frame (header + body)
  * @len: length of the frame data
  *
  * This function is called whenever deauthentication has been processed in
  * station mode. This includes both received deauthentication frames and
- * locally generated ones. This function may sleep.
- */
-void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len);
-
-/**
- * __cfg80211_send_deauth - notification of processed deauthentication
- * @dev: network device
- * @buf: deauthentication frame (header + body)
- * @len: length of the frame data
- *
- * Like cfg80211_send_deauth(), but doesn't take the wdev lock.
- */
-void __cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len);
-
-/**
- * cfg80211_send_disassoc - notification of processed disassociation
- * @dev: network device
- * @buf: disassociation response frame (header + body)
- * @len: length of the frame data
- *
- * This function is called whenever disassociation has been processed in
- * station mode. This includes both received disassociation frames and locally
- * generated ones. This function may sleep.
- */
-void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len);
-
-/**
- * __cfg80211_send_disassoc - notification of processed disassociation
- * @dev: network device
- * @buf: disassociation response frame (header + body)
- * @len: length of the frame data
- *
- * Like cfg80211_send_disassoc(), but doesn't take the wdev lock.
+ * locally generated ones. This function may sleep. The caller must hold the
+ * corresponding wdev's mutex.
  */
-void __cfg80211_send_disassoc(struct net_device *dev, const u8 *buf,
-       size_t len);
+void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len);
 
 /**
- * cfg80211_send_unprot_deauth - notification of unprotected deauthentication
+ * cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame
  * @dev: network device
  * @buf: deauthentication frame (header + body)
  * @len: length of the frame data
  *
- * This function is called whenever a received Deauthentication frame has been
- * dropped in station mode because of MFP being used but the Deauthentication
+ * This function is called whenever a received deauthentication or dissassoc
+ * frame has been dropped in station mode because of MFP being used but the
  * frame was not protected. This function may sleep.
  */
-void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf,
-                                size_t len);
-
-/**
- * cfg80211_send_unprot_disassoc - notification of unprotected disassociation
- * @dev: network device
- * @buf: disassociation frame (header + body)
- * @len: length of the frame data
- *
- * This function is called whenever a received Disassociation frame has been
- * dropped in station mode because of MFP being used but the Disassociation
- * frame was not protected. This function may sleep.
- */
-void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
-                                  size_t len);
+void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev,
+                                 const u8 *buf, size_t len);
 
 /**
  * cfg80211_michael_mic_failure - notification of Michael MIC failure (TKIP)
@@ -4153,6 +4198,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
  * cfg80211_crit_proto_stopped() - indicate critical protocol stopped by driver.
  *
  * @wdev: the wireless device for which critical protocol is stopped.
+ * @gfp: allocation flags
  *
  * This function can be called by the driver to indicate it has reverted
  * operation back to normal. One reason could be that the duration given
index c3999632e616189a81d257dfd68a16ee80f3ce3b..c6d07cb074bc3ed04ec8ba76e417248814196e8c 100644 (file)
@@ -269,6 +269,7 @@ enum ieee80211_radiotap_type {
 #define IEEE80211_RADIOTAP_MCS_HAVE_GI         0x04
 #define IEEE80211_RADIOTAP_MCS_HAVE_FMT                0x08
 #define IEEE80211_RADIOTAP_MCS_HAVE_FEC                0x10
+#define IEEE80211_RADIOTAP_MCS_HAVE_STBC       0x20
 
 #define IEEE80211_RADIOTAP_MCS_BW_MASK         0x03
 #define                IEEE80211_RADIOTAP_MCS_BW_20    0
@@ -278,6 +279,12 @@ enum ieee80211_radiotap_type {
 #define IEEE80211_RADIOTAP_MCS_SGI             0x04
 #define IEEE80211_RADIOTAP_MCS_FMT_GF          0x08
 #define IEEE80211_RADIOTAP_MCS_FEC_LDPC                0x10
+#define IEEE80211_RADIOTAP_MCS_STBC_MASK       0x60
+#define                IEEE80211_RADIOTAP_MCS_STBC_1   1
+#define                IEEE80211_RADIOTAP_MCS_STBC_2   2
+#define                IEEE80211_RADIOTAP_MCS_STBC_3   3
+
+#define IEEE80211_RADIOTAP_MCS_STBC_SHIFT      5
 
 /* For IEEE80211_RADIOTAP_AMPDU_STATUS */
 #define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN                0x0001
index 885898a40d13530504ad41e489b9d58a6b233366..5b7a3dadaddec1b7cabb919a1cf9919b90ff638d 100644 (file)
@@ -217,8 +217,8 @@ struct ieee80211_chanctx_conf {
  * @BSS_CHANGED_TXPOWER: TX power setting changed for this interface
  * @BSS_CHANGED_P2P_PS: P2P powersave settings (CTWindow, opportunistic PS)
  *     changed (currently only in P2P client mode, GO mode will be later)
- * @BSS_CHANGED_DTIM_PERIOD: the DTIM period value was changed (set when
- *     it becomes valid, managed mode only)
+ * @BSS_CHANGED_BEACON_INFO: Data from the AP's beacon became available:
+ *     currently dtim_period only is under consideration.
  * @BSS_CHANGED_BANDWIDTH: The bandwidth used by this interface changed,
  *     note that this is only called when it changes after the channel
  *     context had been assigned.
@@ -244,7 +244,7 @@ enum ieee80211_bss_change {
        BSS_CHANGED_PS                  = 1<<17,
        BSS_CHANGED_TXPOWER             = 1<<18,
        BSS_CHANGED_P2P_PS              = 1<<19,
-       BSS_CHANGED_DTIM_PERIOD         = 1<<20,
+       BSS_CHANGED_BEACON_INFO         = 1<<20,
        BSS_CHANGED_BANDWIDTH           = 1<<21,
 
        /* when adding here, make sure to change ieee80211_reconfig */
@@ -288,7 +288,7 @@ enum ieee80211_rssi_event {
  *     IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE hardware flag
  * @dtim_period: num of beacons before the next DTIM, for beaconing,
  *     valid in station mode only if after the driver was notified
- *     with the %BSS_CHANGED_DTIM_PERIOD flag, will be non-zero then.
+ *     with the %BSS_CHANGED_BEACON_INFO flag, will be non-zero then.
  * @sync_tsf: last beacon's/probe response's TSF timestamp (could be old
  *     as it may have been received during scanning long ago). If the
  *     HW flag %IEEE80211_HW_TIMING_BEACON_ONLY is set, then this can
@@ -305,6 +305,7 @@ enum ieee80211_rssi_event {
  * @basic_rates: bitmap of basic rates, each bit stands for an
  *     index into the rate table configured by the driver in
  *     the current band.
+ * @beacon_rate: associated AP's beacon TX rate
  * @mcast_rate: per-band multicast rate index + 1 (0: disabled)
  * @bssid: The BSSID for this BSS
  * @enable_beacon: whether beaconing should be enabled or not
@@ -352,6 +353,7 @@ struct ieee80211_bss_conf {
        u32 sync_device_ts;
        u8 sync_dtim_count;
        u32 basic_rates;
+       struct ieee80211_rate *beacon_rate;
        int mcast_rate[IEEE80211_NUM_BANDS];
        u16 ht_operation_mode;
        s32 cqm_rssi_thold;
@@ -460,6 +462,8 @@ struct ieee80211_bss_conf {
  * @IEEE80211_TX_CTL_DONTFRAG: Don't fragment this packet even if it
  *     would be fragmented by size (this is optional, only used for
  *     monitor injection).
+ * @IEEE80211_TX_CTL_PS_RESPONSE: This frame is a response to a poll
+ *     frame (PS-Poll or uAPSD).
  *
  * Note: If you have to add new flags to the enumeration, then don't
  *      forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
@@ -495,6 +499,7 @@ enum mac80211_tx_control_flags {
        IEEE80211_TX_STATUS_EOSP                = BIT(28),
        IEEE80211_TX_CTL_USE_MINRATE            = BIT(29),
        IEEE80211_TX_CTL_DONTFRAG               = BIT(30),
+       IEEE80211_TX_CTL_PS_RESPONSE            = BIT(31),
 };
 
 #define IEEE80211_TX_CTL_STBC_SHIFT            23
@@ -805,6 +810,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  *     on this subframe
  * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
  *     is stored in the @ampdu_delimiter_crc field)
+ * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
  */
 enum mac80211_rx_flags {
        RX_FLAG_MMIC_ERROR              = BIT(0),
@@ -832,8 +838,11 @@ enum mac80211_rx_flags {
        RX_FLAG_80MHZ                   = BIT(23),
        RX_FLAG_80P80MHZ                = BIT(24),
        RX_FLAG_160MHZ                  = BIT(25),
+       RX_FLAG_STBC_MASK               = BIT(26) | BIT(27),
 };
 
+#define RX_FLAG_STBC_SHIFT             26
+
 /**
  * struct ieee80211_rx_status - receive status
  *
@@ -850,6 +859,10 @@ enum mac80211_rx_flags {
  * @signal: signal strength when receiving this frame, either in dBm, in dB or
  *     unspecified depending on the hardware capabilities flags
  *     @IEEE80211_HW_SIGNAL_*
+ * @chains: bitmask of receive chains for which separate signal strength
+ *     values were filled.
+ * @chain_signal: per-chain signal strength, in dBm (unlike @signal, doesn't
+ *     support dB or unspecified units)
  * @antenna: antenna used
  * @rate_idx: index of data rate into band's supported rates or MCS index if
  *     HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
@@ -881,6 +894,8 @@ struct ieee80211_rx_status {
        u8 band;
        u8 antenna;
        s8 signal;
+       u8 chains;
+       s8 chain_signal[IEEE80211_MAX_CHAINS];
        u8 ampdu_delimiter_crc;
        u8 vendor_radiotap_align;
        u8 vendor_radiotap_oui[3];
@@ -1235,7 +1250,7 @@ enum ieee80211_sta_rx_bandwidth {
  * struct ieee80211_sta_rates - station rate selection table
  *
  * @rcu_head: RCU head used for freeing the table on update
- * @rates: transmit rates/flags to be used by default.
+ * @rate: transmit rates/flags to be used by default.
  *     Overriding entries per-packet is possible by using cb tx control.
  */
 struct ieee80211_sta_rates {
@@ -1276,7 +1291,7 @@ struct ieee80211_sta_rates {
  *     notifications and capabilities. The value is only valid after
  *     the station moves to associated state.
  * @smps_mode: current SMPS mode (off, static or dynamic)
- * @tx_rates: rate control selection table
+ * @rates: rate control selection table
  */
 struct ieee80211_sta {
        u32 supp_rates[IEEE80211_NUM_BANDS];
index b87a1692b0864f436a9185447cbfe74cce6ec7e1..0af851c3b038186357b8576ec4025bb5193ee1fd 100644 (file)
@@ -59,8 +59,10 @@ struct nfc_hci_ops {
                              struct nfc_target *target);
        int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event,
                              struct sk_buff *skb);
-       int (*enable_se)(struct nfc_dev *dev, u32 secure_element);
-       int (*disable_se)(struct nfc_dev *dev, u32 secure_element);
+       int (*fw_upload)(struct nfc_hci_dev *hdev, const char *firmware_name);
+       int (*discover_se)(struct nfc_hci_dev *dev);
+       int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx);
+       int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx);
 };
 
 /* Pipes */
@@ -152,7 +154,6 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
                                            struct nfc_hci_init_data *init_data,
                                            unsigned long quirks,
                                            u32 protocols,
-                                           u32 supported_se,
                                            const char *llc_name,
                                            int tx_headroom,
                                            int tx_tailroom,
index 5bc0c460edc0189b56633a111159a15f7a430131..99fc1f3a392af172b2c3d5b5b499898f5cfbae39 100644 (file)
@@ -3,6 +3,7 @@
  *  NFC Controller (NFCC) and a Device Host (DH).
  *
  *  Copyright (C) 2011 Texas Instruments, Inc.
+ *  Copyright (C) 2013 Intel Corporation. All rights reserved.
  *
  *  Written by Ilan Elias <ilane@ti.com>
  *
@@ -66,7 +67,7 @@ struct nci_dev;
 struct nci_ops {
        int (*open)(struct nci_dev *ndev);
        int (*close)(struct nci_dev *ndev);
-       int (*send)(struct sk_buff *skb);
+       int (*send)(struct nci_dev *ndev, struct sk_buff *skb);
 };
 
 #define NCI_MAX_SUPPORTED_RF_INTERFACES                4
@@ -147,13 +148,12 @@ struct nci_dev {
 /* ----- NCI Devices ----- */
 struct nci_dev *nci_allocate_device(struct nci_ops *ops,
                                    __u32 supported_protocols,
-                                   __u32 supported_se,
                                    int tx_headroom,
                                    int tx_tailroom);
 void nci_free_device(struct nci_dev *ndev);
 int nci_register_device(struct nci_dev *ndev);
 void nci_unregister_device(struct nci_dev *ndev);
-int nci_recv_frame(struct sk_buff *skb);
+int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
 
 static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev,
                                            unsigned int len,
@@ -202,4 +202,56 @@ void nci_req_complete(struct nci_dev *ndev, int result);
 /* ----- NCI status code ----- */
 int nci_to_errno(__u8 code);
 
+/* ----- NCI over SPI acknowledge modes ----- */
+#define NCI_SPI_CRC_DISABLED   0x00
+#define NCI_SPI_CRC_ENABLED    0x01
+
+/* ----- NCI SPI structures ----- */
+struct nci_spi_dev;
+
+struct nci_spi_ops {
+       int (*open)(struct nci_spi_dev *ndev);
+       int (*close)(struct nci_spi_dev *ndev);
+       void (*assert_int)(struct nci_spi_dev *ndev);
+       void (*deassert_int)(struct nci_spi_dev *ndev);
+};
+
+struct nci_spi_dev {
+       struct nci_dev          *nci_dev;
+       struct spi_device       *spi;
+       struct nci_spi_ops      *ops;
+
+       unsigned int            xfer_udelay;    /* microseconds delay between
+                                                 transactions */
+       u8                      acknowledge_mode;
+
+       struct completion       req_completion;
+       u8                      req_result;
+
+       void                    *driver_data;
+};
+
+/* ----- NCI SPI Devices ----- */
+struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi,
+                                               struct nci_spi_ops *ops,
+                                               u32 supported_protocols,
+                                               u32 supported_se,
+                                               u8 acknowledge_mode,
+                                               unsigned int delay);
+void nci_spi_free_device(struct nci_spi_dev *ndev);
+int nci_spi_register_device(struct nci_spi_dev *ndev);
+void nci_spi_unregister_device(struct nci_spi_dev *ndev);
+int nci_spi_recv_frame(struct nci_spi_dev *ndev);
+
+static inline void nci_spi_set_drvdata(struct nci_spi_dev *ndev,
+                                           void *data)
+{
+       ndev->driver_data = data;
+}
+
+static inline void *nci_spi_get_drvdata(struct nci_spi_dev *ndev)
+{
+       return ndev->driver_data;
+}
+
 #endif /* __NCI_CORE_H */
index 5eb80bb3cbb233f02401cec1f1ee8103119f3da0..0e353f1658bb46e638b287ee2bbfdbd33b37a584 100644 (file)
@@ -68,8 +68,12 @@ struct nfc_ops {
                             void *cb_context);
        int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb);
        int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target);
-       int (*enable_se)(struct nfc_dev *dev, u32 secure_element);
-       int (*disable_se)(struct nfc_dev *dev, u32 secure_element);
+       int (*fw_upload)(struct nfc_dev *dev, const char *firmware_name);
+
+       /* Secure Element API */
+       int (*discover_se)(struct nfc_dev *dev);
+       int (*enable_se)(struct nfc_dev *dev, u32 se_idx);
+       int (*disable_se)(struct nfc_dev *dev, u32 se_idx);
 };
 
 #define NFC_TARGET_IDX_ANY -1
@@ -83,6 +87,8 @@ struct nfc_target {
        u8 sel_res;
        u8 nfcid1_len;
        u8 nfcid1[NFC_NFCID1_MAXSIZE];
+       u8 nfcid2_len;
+       u8 nfcid2[NFC_NFCID2_MAXSIZE];
        u8 sensb_res_len;
        u8 sensb_res[NFC_SENSB_RES_MAXSIZE];
        u8 sensf_res_len;
@@ -91,6 +97,23 @@ struct nfc_target {
        u8 logical_idx;
 };
 
+/**
+ * nfc_se - A structure for NFC accessible secure elements.
+ *
+ * @idx: The secure element index. User space will enable or
+ *       disable a secure element by its index.
+ * @type: The secure element type. It can be SE_UICC or
+ *        SE_EMBEDDED.
+ * @state: The secure element state, either enabled or disabled.
+ *
+ */
+struct nfc_se {
+       struct list_head list;
+       u32 idx;
+       u16 type;
+       u16 state;
+};
+
 struct nfc_genl_data {
        u32 poll_req_portid;
        struct mutex genl_data_mutex;
@@ -104,6 +127,7 @@ struct nfc_dev {
        int targets_generation;
        struct device dev;
        bool dev_up;
+       bool fw_upload_in_progress;
        u8 rf_mode;
        bool polling;
        struct nfc_target *active_target;
@@ -111,8 +135,7 @@ struct nfc_dev {
        struct nfc_genl_data genl_data;
        u32 supported_protocols;
 
-       u32 supported_se;
-       u32 active_se;
+       struct list_head secure_elements;
 
        int tx_headroom;
        int tx_tailroom;
@@ -132,7 +155,6 @@ extern struct class nfc_class;
 
 struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
                                    u32 supported_protocols,
-                                   u32 supported_se,
                                    int tx_headroom,
                                    int tx_tailroom);
 
@@ -216,4 +238,7 @@ int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb);
 
 void nfc_driver_failure(struct nfc_dev *dev, int err);
 
+int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type);
+int nfc_remove_se(struct nfc_dev *dev, u32 se_idx);
+
 #endif /* __NET_NFC_H */
index ef937b56f9b54c44e4003d762eaa7105534077da..e2c1e66d58ae6c810e4ca18d4e62d9f78b2c3e4b 100644 (file)
@@ -118,7 +118,7 @@ struct ex_phy {
 
        enum ex_phy_state phy_state;
 
-       enum sas_dev_type attached_dev_type;
+       enum sas_device_type attached_dev_type;
        enum sas_linkrate linkrate;
 
        u8   attached_sata_host:1;
@@ -195,7 +195,7 @@ enum {
 
 struct domain_device {
        spinlock_t done_lock;
-        enum sas_dev_type dev_type;
+       enum sas_device_type dev_type;
 
         enum sas_linkrate linkrate;
         enum sas_linkrate min_linkrate;
index a6026da25f3e0bf5787d479e1511da85c1175299..25ac6283b9c753a6af74055d590a1ed613004be1 100644 (file)
@@ -107,7 +107,7 @@ enum osd_attributes_mode {
  *             int exponent: 04;
  *     }
  */
-typedef __be32 __bitwise osd_cdb_offset;
+typedef __be32 osd_cdb_offset;
 
 enum {
        OSD_OFFSET_UNUSED = 0xFFFFFFFF,
index be3eb0bf1ac0ef1213343a173787c645d16e21ef..0d2607d1238753f951e43e6b278735971413423c 100644 (file)
@@ -90,16 +90,18 @@ enum sas_oob_mode {
 };
 
 /* See sas_discover.c if you plan on changing these */
-enum sas_dev_type {
-       NO_DEVICE   = 0,          /* protocol */
-       SAS_END_DEV = 1,          /* protocol */
-       EDGE_DEV    = 2,          /* protocol */
-       FANOUT_DEV  = 3,          /* protocol */
-       SAS_HA      = 4,
-       SATA_DEV    = 5,
-       SATA_PM     = 7,
-       SATA_PM_PORT= 8,
-       SATA_PENDING  = 9,
+enum sas_device_type {
+       /* these are SAS protocol defined (attached device type field) */
+       SAS_PHY_UNUSED = 0,
+       SAS_END_DEVICE = 1,
+       SAS_EDGE_EXPANDER_DEVICE = 2,
+       SAS_FANOUT_EXPANDER_DEVICE = 3,
+       /* these are internal to libsas */
+       SAS_HA = 4,
+       SAS_SATA_DEV = 5,
+       SAS_SATA_PM = 7,
+       SAS_SATA_PM_PORT = 8,
+       SAS_SATA_PENDING = 9,
 };
 
 enum sas_protocol {
index ff71a56546845f2e29266ecf466ecd6d7174f38a..00f41aeeecf54f321b74599b99dc5f97a5af0bca 100644 (file)
@@ -32,8 +32,8 @@
 
 static inline int dev_is_sata(struct domain_device *dev)
 {
-       return dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
-              dev->dev_type == SATA_PM_PORT || dev->dev_type == SATA_PENDING;
+       return dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM ||
+              dev->dev_type == SAS_SATA_PM_PORT || dev->dev_type == SAS_SATA_PENDING;
 }
 
 int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);
index a7f9cba275e98405b7da60143361b87cc4fcc231..cc645876d14737db54ea12aeb9959b7a00dfe9c2 100644 (file)
@@ -394,10 +394,18 @@ extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
                        int data_direction, void *buffer, unsigned bufflen,
                        unsigned char *sense, int timeout, int retries,
                        int flag, int *resid);
-extern int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
-                           int data_direction, void *buffer, unsigned bufflen,
-                           struct scsi_sense_hdr *, int timeout, int retries,
-                           int *resid);
+extern int scsi_execute_req_flags(struct scsi_device *sdev,
+       const unsigned char *cmd, int data_direction, void *buffer,
+       unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
+       int retries, int *resid, int flags);
+static inline int scsi_execute_req(struct scsi_device *sdev,
+       const unsigned char *cmd, int data_direction, void *buffer,
+       unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
+       int retries, int *resid)
+{
+       return scsi_execute_req_flags(sdev, cmd, data_direction, buffer,
+               bufflen, sshdr, timeout, retries, resid, 0);
+}
 extern void sdev_disable_disk_events(struct scsi_device *sdev);
 extern void sdev_enable_disk_events(struct scsi_device *sdev);
 
index 4a58cca2ecc182f1d44912297cc294b3ecce4760..d0f1602985e7efe6d6ab40dd67fbcc04e6a32276 100644 (file)
@@ -471,14 +471,10 @@ iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess);
 extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
 extern int iscsi_flashnode_bus_match(struct device *dev,
                                     struct device_driver *drv);
-extern int iscsi_is_flashnode_conn_dev(struct device *dev, void *data);
-
 extern struct device *
 iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
                          int (*fn)(struct device *dev, void *data));
-
 extern struct device *
-iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
-                         void *data,
-                         int (*fn)(struct device *dev, void *data));
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess);
+
 #endif
index 9b8e08879cfc9d7f7038245cf12e9b162f0f7f4e..0bd71e2702e3181e115ba8aa8e2b49a78ac21da7 100644 (file)
@@ -10,13 +10,6 @@ struct scsi_transport_template;
 struct sas_rphy;
 struct request;
 
-enum sas_device_type {
-       SAS_PHY_UNUSED = 0,
-       SAS_END_DEVICE = 1,
-       SAS_EDGE_EXPANDER_DEVICE = 2,
-       SAS_FANOUT_EXPANDER_DEVICE = 3,
-};
-
 static inline int sas_protocol_ata(enum sas_protocol proto)
 {
        return ((proto & SAS_PROTOCOL_SATA) ||
index 28c65e1ada21e8f632422055d70d66789d63a3d5..e11e179420a11fb07bce24e48e960bfdba9671a4 100644 (file)
 #define DECLARE_TLV_DB_LINEAR(name, min_dB, max_dB)    \
        unsigned int name[] = { TLV_DB_LINEAR_ITEM(min_dB, max_dB) }
 
-/* dB range container */
+/* dB range container:
+ * Items in dB range container must be ordered by their values and by their
+ * dB values. This implies that larger values must correspond with larger
+ * dB values (which is also required for all other mixer controls).
+ */
 /* Each item is: <min> <max> <TLV> */
 #define TLV_DB_RANGE_ITEM(...) \
        TLV_ITEM(SNDRV_CTL_TLVT_DB_RANGE, __VA_ARGS__)
index 9f096f1c0907b7056bcbe396783fdb2237724a65..75cef3fd97add201693b6bf21f4e2f1754c96f43 100644 (file)
 #define AUDIT_OBJ_TYPE 21
 #define AUDIT_OBJ_LEV_LOW      22
 #define AUDIT_OBJ_LEV_HIGH     23
+#define AUDIT_LOGINUID_SET     24
 
                                /* These are ONLY useful when checking
                                 * at syscall exit time (AUDIT_AT_EXIT). */
@@ -369,7 +370,8 @@ struct audit_status {
 };
 
 struct audit_tty_status {
-       __u32           enabled; /* 1 = enabled, 0 = disabled */
+       __u32           enabled;        /* 1 = enabled, 0 = disabled */
+       __u32           log_passwd;     /* 1 = enabled, 0 = disabled */
 };
 
 /* audit_rule_data supports filter rules with both integer and string
index 7c6f627a717dc159bdf6f38170aa0908f717f65e..caed0f324d5ffc6fd88a59dda72f51636af0707a 100644 (file)
@@ -69,6 +69,8 @@
  *     starting a poll from a device which has a secure element enabled means
  *     we want to do SE based card emulation.
  * @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element.
+ * @NFC_CMD_FW_UPLOAD: Request to Load/flash firmware, or event to inform that
+ *     some firmware was loaded
  */
 enum nfc_commands {
        NFC_CMD_UNSPEC,
@@ -92,6 +94,9 @@ enum nfc_commands {
        NFC_CMD_DISABLE_SE,
        NFC_CMD_LLC_SDREQ,
        NFC_EVENT_LLC_SDRES,
+       NFC_CMD_FW_UPLOAD,
+       NFC_EVENT_SE_ADDED,
+       NFC_EVENT_SE_REMOVED,
 /* private: internal use only */
        __NFC_CMD_AFTER_LAST
 };
@@ -121,6 +126,9 @@ enum nfc_commands {
  * @NFC_ATTR_LLC_PARAM_RW: Receive Window size parameter
  * @NFC_ATTR_LLC_PARAM_MIUX: MIU eXtension parameter
  * @NFC_ATTR_SE: Available Secure Elements
+ * @NFC_ATTR_FIRMWARE_NAME: Free format firmware version
+ * @NFC_ATTR_SE_INDEX: Secure element index
+ * @NFC_ATTR_SE_TYPE: Secure element type (UICC or EMBEDDED)
  */
 enum nfc_attrs {
        NFC_ATTR_UNSPEC,
@@ -143,6 +151,9 @@ enum nfc_attrs {
        NFC_ATTR_LLC_PARAM_MIUX,
        NFC_ATTR_SE,
        NFC_ATTR_LLC_SDP,
+       NFC_ATTR_FIRMWARE_NAME,
+       NFC_ATTR_SE_INDEX,
+       NFC_ATTR_SE_TYPE,
 /* private: internal use only */
        __NFC_ATTR_AFTER_LAST
 };
@@ -159,9 +170,12 @@ enum nfc_sdp_attr {
 
 #define NFC_DEVICE_NAME_MAXSIZE 8
 #define NFC_NFCID1_MAXSIZE 10
+#define NFC_NFCID2_MAXSIZE 8
+#define NFC_NFCID3_MAXSIZE 10
 #define NFC_SENSB_RES_MAXSIZE 12
 #define NFC_SENSF_RES_MAXSIZE 18
 #define NFC_GB_MAXSIZE        48
+#define NFC_FIRMWARE_NAME_MAXSIZE 32
 
 /* NFC protocols */
 #define NFC_PROTO_JEWEL                1
@@ -191,10 +205,12 @@ enum nfc_sdp_attr {
 #define NFC_PROTO_ISO14443_B_MASK (1 << NFC_PROTO_ISO14443_B)
 
 /* NFC Secure Elements */
-#define NFC_SE_NONE     0x0
 #define NFC_SE_UICC     0x1
 #define NFC_SE_EMBEDDED 0x2
 
+#define NFC_SE_DISABLED 0x0
+#define NFC_SE_ENABLED  0x1
+
 struct sockaddr_nfc {
        sa_family_t sa_family;
        __u32 dev_idx;
index d1e48b5e348fa829d5b05ca9d1886bf2c077c4d6..861e5eba3953b613956a6ca24f632c13d2a1e937 100644 (file)
@@ -27,6 +27,8 @@
 
 #include <linux/types.h>
 
+#define NL80211_GENL_NAME "nl80211"
+
 /**
  * DOC: Station handling
  *
@@ -1429,6 +1431,11 @@ enum nl80211_commands {
  * @NL80211_ATTR_MAX_CRIT_PROT_DURATION: duration in milliseconds in which
  *      the connection should have increased reliability (u16).
  *
+ * @NL80211_ATTR_PEER_AID: Association ID for the peer TDLS station (u16).
+ *     This is similar to @NL80211_ATTR_STA_AID but with a difference of being
+ *     allowed to be used with the first @NL80211_CMD_SET_STATION command to
+ *     update a TDLS peer STA entry.
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1727,6 +1734,8 @@ enum nl80211_attrs {
        NL80211_ATTR_CRIT_PROT_ID,
        NL80211_ATTR_MAX_CRIT_PROT_DURATION,
 
+       NL80211_ATTR_PEER_AID,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -1991,6 +2000,10 @@ enum nl80211_sta_bss_param {
  * @NL80211_STA_INFO_PEER_PM: peer mesh STA link-specific power mode
  * @NL80211_STA_INFO_NONPEER_PM: neighbor mesh STA power save mode towards
  *     non-peer STA
+ * @NL80211_STA_INFO_CHAIN_SIGNAL: per-chain signal strength of last PPDU
+ *     Contains a nested array of signal strength attributes (u8, dBm)
+ * @NL80211_STA_INFO_CHAIN_SIGNAL_AVG: per-chain signal strength average
+ *     Same format as NL80211_STA_INFO_CHAIN_SIGNAL.
  * @__NL80211_STA_INFO_AFTER_LAST: internal
  * @NL80211_STA_INFO_MAX: highest possible station info attribute
  */
@@ -2020,6 +2033,8 @@ enum nl80211_sta_info {
        NL80211_STA_INFO_NONPEER_PM,
        NL80211_STA_INFO_RX_BYTES64,
        NL80211_STA_INFO_TX_BYTES64,
+       NL80211_STA_INFO_CHAIN_SIGNAL,
+       NL80211_STA_INFO_CHAIN_SIGNAL_AVG,
 
        /* keep last */
        __NL80211_STA_INFO_AFTER_LAST,
@@ -2413,6 +2428,8 @@ enum nl80211_survey_info {
  * @NL80211_MNTR_FLAG_OTHER_BSS: disable BSSID filtering
  * @NL80211_MNTR_FLAG_COOK_FRAMES: report frames after processing.
  *     overrides all other flags.
+ * @NL80211_MNTR_FLAG_ACTIVE: use the configured MAC address
+ *     and ACK incoming unicast packets.
  *
  * @__NL80211_MNTR_FLAG_AFTER_LAST: internal use
  * @NL80211_MNTR_FLAG_MAX: highest possible monitor flag
@@ -2424,6 +2441,7 @@ enum nl80211_mntr_flags {
        NL80211_MNTR_FLAG_CONTROL,
        NL80211_MNTR_FLAG_OTHER_BSS,
        NL80211_MNTR_FLAG_COOK_FRAMES,
+       NL80211_MNTR_FLAG_ACTIVE,
 
        /* keep last */
        __NL80211_MNTR_FLAG_AFTER_LAST,
@@ -2559,6 +2577,10 @@ enum nl80211_mesh_power_mode {
  *
  * @NL80211_MESHCONF_AWAKE_WINDOW: awake window duration (in TUs)
  *
+ * @NL80211_MESHCONF_PLINK_TIMEOUT: If no tx activity is seen from a STA we've
+ *     established peering with for longer than this time (in seconds), then
+ *     remove it from the STA's list of peers.  Default is 30 minutes.
+ *
  * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
  */
 enum nl80211_meshconf_params {
@@ -2590,6 +2612,7 @@ enum nl80211_meshconf_params {
        NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
        NL80211_MESHCONF_POWER_MODE,
        NL80211_MESHCONF_AWAKE_WINDOW,
+       NL80211_MESHCONF_PLINK_TIMEOUT,
 
        /* keep last */
        __NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -2637,6 +2660,10 @@ enum nl80211_meshconf_params {
  * @NL80211_MESH_SETUP_USERSPACE_MPM: Enable this option if userspace will
  *     implement an MPM which handles peer allocation and state.
  *
+ * @NL80211_MESH_SETUP_AUTH_PROTOCOL: Inform the kernel of the authentication
+ *     method (u8, as defined in IEEE 8.4.2.100.6, e.g. 0x1 for SAE).
+ *     Default is no authentication method required.
+ *
  * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
  *
  * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use
@@ -2650,6 +2677,7 @@ enum nl80211_mesh_setup_params {
        NL80211_MESH_SETUP_USERSPACE_AMPE,
        NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC,
        NL80211_MESH_SETUP_USERSPACE_MPM,
+       NL80211_MESH_SETUP_AUTH_PROTOCOL,
 
        /* keep last */
        __NL80211_MESH_SETUP_ATTR_AFTER_LAST,
@@ -2730,6 +2758,8 @@ enum nl80211_channel_type {
  *     and %NL80211_ATTR_CENTER_FREQ2 attributes must be provided as well
  * @NL80211_CHAN_WIDTH_160: 160 MHz channel, the %NL80211_ATTR_CENTER_FREQ1
  *     attribute must be provided as well
+ * @NL80211_CHAN_WIDTH_5: 5 MHz OFDM channel
+ * @NL80211_CHAN_WIDTH_10: 10 MHz OFDM channel
  */
 enum nl80211_chan_width {
        NL80211_CHAN_WIDTH_20_NOHT,
@@ -2738,6 +2768,8 @@ enum nl80211_chan_width {
        NL80211_CHAN_WIDTH_80,
        NL80211_CHAN_WIDTH_80P80,
        NL80211_CHAN_WIDTH_160,
+       NL80211_CHAN_WIDTH_5,
+       NL80211_CHAN_WIDTH_10,
 };
 
 /**
@@ -3556,6 +3588,10 @@ enum nl80211_ap_sme_features {
  *     Peering Management entity which may be implemented by registering for
  *     beacons or NL80211_CMD_NEW_PEER_CANDIDATE events. The mesh beacon is
  *     still generated by the driver.
+ * @NL80211_FEATURE_ACTIVE_MONITOR: This driver supports an active monitor
+ *     interface. An active monitor interface behaves like a normal monitor
+ *     interface, but gets added to the driver. It ensures that incoming
+ *     unicast packets directed at the configured interface address get ACKed.
  */
 enum nl80211_feature_flags {
        NL80211_FEATURE_SK_TX_STATUS                    = 1 << 0,
@@ -3575,6 +3611,7 @@ enum nl80211_feature_flags {
        NL80211_FEATURE_ADVERTISE_CHAN_LIMITS           = 1 << 14,
        NL80211_FEATURE_FULL_AP_CLIENT_STATE            = 1 << 15,
        NL80211_FEATURE_USERSPACE_MPM                   = 1 << 16,
+       NL80211_FEATURE_ACTIVE_MONITOR                  = 1 << 17,
 };
 
 /**
index 0b084fa44b1f21f3cc73508676e79d58b08f1896..21c7fa615bd3107b0c28a4da499ea3ee7361d695 100644 (file)
@@ -49,6 +49,8 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
 
 #include <linux/audit.h>
 
@@ -265,7 +267,6 @@ void audit_log_lost(const char *message)
 }
 
 static int audit_log_config_change(char *function_name, int new, int old,
-                                  kuid_t loginuid, u32 sessionid, u32 sid,
                                   int allow_changes)
 {
        struct audit_buffer *ab;
@@ -274,29 +275,17 @@ static int audit_log_config_change(char *function_name, int new, int old,
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
        if (unlikely(!ab))
                return rc;
-       audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new,
-                        old, from_kuid(&init_user_ns, loginuid), sessionid);
-       if (sid) {
-               char *ctx = NULL;
-               u32 len;
-
-               rc = security_secid_to_secctx(sid, &ctx, &len);
-               if (rc) {
-                       audit_log_format(ab, " sid=%u", sid);
-                       allow_changes = 0; /* Something weird, deny request */
-               } else {
-                       audit_log_format(ab, " subj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
+       audit_log_format(ab, "%s=%d old=%d", function_name, new, old);
+       audit_log_session_info(ab);
+       rc = audit_log_task_context(ab);
+       if (rc)
+               allow_changes = 0; /* Something weird, deny request */
        audit_log_format(ab, " res=%d", allow_changes);
        audit_log_end(ab);
        return rc;
 }
 
-static int audit_do_config_change(char *function_name, int *to_change,
-                                 int new, kuid_t loginuid, u32 sessionid,
-                                 u32 sid)
+static int audit_do_config_change(char *function_name, int *to_change, int new)
 {
        int allow_changes, rc = 0, old = *to_change;
 
@@ -307,8 +296,7 @@ static int audit_do_config_change(char *function_name, int *to_change,
                allow_changes = 1;
 
        if (audit_enabled != AUDIT_OFF) {
-               rc = audit_log_config_change(function_name, new, old, loginuid,
-                                            sessionid, sid, allow_changes);
+               rc = audit_log_config_change(function_name, new, old, allow_changes);
                if (rc)
                        allow_changes = 0;
        }
@@ -322,44 +310,37 @@ static int audit_do_config_change(char *function_name, int *to_change,
        return rc;
 }
 
-static int audit_set_rate_limit(int limit, kuid_t loginuid, u32 sessionid,
-                               u32 sid)
+static int audit_set_rate_limit(int limit)
 {
-       return audit_do_config_change("audit_rate_limit", &audit_rate_limit,
-                                     limit, loginuid, sessionid, sid);
+       return audit_do_config_change("audit_rate_limit", &audit_rate_limit, limit);
 }
 
-static int audit_set_backlog_limit(int limit, kuid_t loginuid, u32 sessionid,
-                                  u32 sid)
+static int audit_set_backlog_limit(int limit)
 {
-       return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit,
-                                     limit, loginuid, sessionid, sid);
+       return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, limit);
 }
 
-static int audit_set_enabled(int state, kuid_t loginuid, u32 sessionid, u32 sid)
+static int audit_set_enabled(int state)
 {
        int rc;
        if (state < AUDIT_OFF || state > AUDIT_LOCKED)
                return -EINVAL;
 
-       rc =  audit_do_config_change("audit_enabled", &audit_enabled, state,
-                                    loginuid, sessionid, sid);
-
+       rc =  audit_do_config_change("audit_enabled", &audit_enabled, state);
        if (!rc)
                audit_ever_enabled |= !!state;
 
        return rc;
 }
 
-static int audit_set_failure(int state, kuid_t loginuid, u32 sessionid, u32 sid)
+static int audit_set_failure(int state)
 {
        if (state != AUDIT_FAIL_SILENT
            && state != AUDIT_FAIL_PRINTK
            && state != AUDIT_FAIL_PANIC)
                return -EINVAL;
 
-       return audit_do_config_change("audit_failure", &audit_failure, state,
-                                     loginuid, sessionid, sid);
+       return audit_do_config_change("audit_failure", &audit_failure, state);
 }
 
 /*
@@ -417,34 +398,53 @@ static void kauditd_send_skb(struct sk_buff *skb)
                consume_skb(skb);
 }
 
-static int kauditd_thread(void *dummy)
+/*
+ * flush_hold_queue - empty the hold queue if auditd appears
+ *
+ * If auditd just started, drain the queue of messages already
+ * sent to syslog/printk.  Remember loss here is ok.  We already
+ * called audit_log_lost() if it didn't go out normally.  so the
+ * race between the skb_dequeue and the next check for audit_pid
+ * doesn't matter.
+ *
+ * If you ever find kauditd to be too slow we can get a perf win
+ * by doing our own locking and keeping better track if there
+ * are messages in this queue.  I don't see the need now, but
+ * in 5 years when I want to play with this again I'll see this
+ * note and still have no friggin idea what i'm thinking today.
+ */
+static void flush_hold_queue(void)
 {
        struct sk_buff *skb;
 
+       if (!audit_default || !audit_pid)
+               return;
+
+       skb = skb_dequeue(&audit_skb_hold_queue);
+       if (likely(!skb))
+               return;
+
+       while (skb && audit_pid) {
+               kauditd_send_skb(skb);
+               skb = skb_dequeue(&audit_skb_hold_queue);
+       }
+
+       /*
+        * if auditd just disappeared but we
+        * dequeued an skb we need to drop ref
+        */
+       if (skb)
+               consume_skb(skb);
+}
+
+static int kauditd_thread(void *dummy)
+{
        set_freezable();
        while (!kthread_should_stop()) {
-               /*
-                * if auditd just started drain the queue of messages already
-                * sent to syslog/printk.  remember loss here is ok.  we already
-                * called audit_log_lost() if it didn't go out normally.  so the
-                * race between the skb_dequeue and the next check for audit_pid
-                * doesn't matter.
-                *
-                * if you ever find kauditd to be too slow we can get a perf win
-                * by doing our own locking and keeping better track if there
-                * are messages in this queue.  I don't see the need now, but
-                * in 5 years when I want to play with this again I'll see this
-                * note and still have no friggin idea what i'm thinking today.
-                */
-               if (audit_default && audit_pid) {
-                       skb = skb_dequeue(&audit_skb_hold_queue);
-                       if (unlikely(skb)) {
-                               while (skb && audit_pid) {
-                                       kauditd_send_skb(skb);
-                                       skb = skb_dequeue(&audit_skb_hold_queue);
-                               }
-                       }
-               }
+               struct sk_buff *skb;
+               DECLARE_WAITQUEUE(wait, current);
+
+               flush_hold_queue();
 
                skb = skb_dequeue(&audit_skb_queue);
                wake_up(&audit_backlog_wait);
@@ -453,19 +453,18 @@ static int kauditd_thread(void *dummy)
                                kauditd_send_skb(skb);
                        else
                                audit_printk_skb(skb);
-               } else {
-                       DECLARE_WAITQUEUE(wait, current);
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       add_wait_queue(&kauditd_wait, &wait);
-
-                       if (!skb_queue_len(&audit_skb_queue)) {
-                               try_to_freeze();
-                               schedule();
-                       }
+                       continue;
+               }
+               set_current_state(TASK_INTERRUPTIBLE);
+               add_wait_queue(&kauditd_wait, &wait);
 
-                       __set_current_state(TASK_RUNNING);
-                       remove_wait_queue(&kauditd_wait, &wait);
+               if (!skb_queue_len(&audit_skb_queue)) {
+                       try_to_freeze();
+                       schedule();
                }
+
+               __set_current_state(TASK_RUNNING);
+               remove_wait_queue(&kauditd_wait, &wait);
        }
        return 0;
 }
@@ -579,13 +578,14 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
                return -EPERM;
 
        switch (msg_type) {
-       case AUDIT_GET:
        case AUDIT_LIST:
-       case AUDIT_LIST_RULES:
-       case AUDIT_SET:
        case AUDIT_ADD:
-       case AUDIT_ADD_RULE:
        case AUDIT_DEL:
+               return -EOPNOTSUPP;
+       case AUDIT_GET:
+       case AUDIT_SET:
+       case AUDIT_LIST_RULES:
+       case AUDIT_ADD_RULE:
        case AUDIT_DEL_RULE:
        case AUDIT_SIGNAL_INFO:
        case AUDIT_TTY_GET:
@@ -608,12 +608,10 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
        return err;
 }
 
-static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
-                                    kuid_t auid, u32 ses, u32 sid)
+static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
 {
        int rc = 0;
-       char *ctx = NULL;
-       u32 len;
+       uid_t uid = from_kuid(&init_user_ns, current_uid());
 
        if (!audit_enabled) {
                *ab = NULL;
@@ -623,33 +621,21 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
        *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
        if (unlikely(!*ab))
                return rc;
-       audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u",
-                        task_tgid_vnr(current),
-                        from_kuid(&init_user_ns, current_uid()),
-                        from_kuid(&init_user_ns, auid), ses);
-       if (sid) {
-               rc = security_secid_to_secctx(sid, &ctx, &len);
-               if (rc)
-                       audit_log_format(*ab, " ssid=%u", sid);
-               else {
-                       audit_log_format(*ab, " subj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
+       audit_log_format(*ab, "pid=%d uid=%u", task_tgid_vnr(current), uid);
+       audit_log_session_info(*ab);
+       audit_log_task_context(*ab);
 
        return rc;
 }
 
 static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-       u32                     seq, sid;
+       u32                     seq;
        void                    *data;
        struct audit_status     *status_get, status_set;
        int                     err;
        struct audit_buffer     *ab;
        u16                     msg_type = nlh->nlmsg_type;
-       kuid_t                  loginuid; /* loginuid of sender */
-       u32                     sessionid;
        struct audit_sig_info   *sig_data;
        char                    *ctx = NULL;
        u32                     len;
@@ -668,9 +654,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        return err;
                }
        }
-       loginuid = audit_get_loginuid(current);
-       sessionid = audit_get_sessionid(current);
-       security_task_getsecid(current, &sid);
        seq  = nlh->nlmsg_seq;
        data = nlmsg_data(nlh);
 
@@ -691,14 +674,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        return -EINVAL;
                status_get   = (struct audit_status *)data;
                if (status_get->mask & AUDIT_STATUS_ENABLED) {
-                       err = audit_set_enabled(status_get->enabled,
-                                               loginuid, sessionid, sid);
+                       err = audit_set_enabled(status_get->enabled);
                        if (err < 0)
                                return err;
                }
                if (status_get->mask & AUDIT_STATUS_FAILURE) {
-                       err = audit_set_failure(status_get->failure,
-                                               loginuid, sessionid, sid);
+                       err = audit_set_failure(status_get->failure);
                        if (err < 0)
                                return err;
                }
@@ -706,22 +687,17 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        int new_pid = status_get->pid;
 
                        if (audit_enabled != AUDIT_OFF)
-                               audit_log_config_change("audit_pid", new_pid,
-                                                       audit_pid, loginuid,
-                                                       sessionid, sid, 1);
-
+                               audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
                        audit_pid = new_pid;
                        audit_nlk_portid = NETLINK_CB(skb).portid;
                }
                if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) {
-                       err = audit_set_rate_limit(status_get->rate_limit,
-                                                  loginuid, sessionid, sid);
+                       err = audit_set_rate_limit(status_get->rate_limit);
                        if (err < 0)
                                return err;
                }
                if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
-                       err = audit_set_backlog_limit(status_get->backlog_limit,
-                                                     loginuid, sessionid, sid);
+                       err = audit_set_backlog_limit(status_get->backlog_limit);
                break;
        case AUDIT_USER:
        case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
@@ -729,25 +705,22 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (!audit_enabled && msg_type != AUDIT_USER_AVC)
                        return 0;
 
-               err = audit_filter_user();
+               err = audit_filter_user(msg_type);
                if (err == 1) {
                        err = 0;
                        if (msg_type == AUDIT_USER_TTY) {
-                               err = tty_audit_push_task(current, loginuid,
-                                                            sessionid);
+                               err = tty_audit_push_current();
                                if (err)
                                        break;
                        }
-                       audit_log_common_recv_msg(&ab, msg_type,
-                                                 loginuid, sessionid, sid);
-
+                       audit_log_common_recv_msg(&ab, msg_type);
                        if (msg_type != AUDIT_USER_TTY)
                                audit_log_format(ab, " msg='%.1024s'",
                                                 (char *)data);
                        else {
                                int size;
 
-                               audit_log_format(ab, " msg=");
+                               audit_log_format(ab, " data=");
                                size = nlmsg_len(nlh);
                                if (size > 0 &&
                                    ((unsigned char *)data)[size - 1] == '\0')
@@ -758,50 +731,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        audit_log_end(ab);
                }
                break;
-       case AUDIT_ADD:
-       case AUDIT_DEL:
-               if (nlmsg_len(nlh) < sizeof(struct audit_rule))
-                       return -EINVAL;
-               if (audit_enabled == AUDIT_LOCKED) {
-                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-                                                 loginuid, sessionid, sid);
-
-                       audit_log_format(ab, " audit_enabled=%d res=0",
-                                        audit_enabled);
-                       audit_log_end(ab);
-                       return -EPERM;
-               }
-               /* fallthrough */
-       case AUDIT_LIST:
-               err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
-                                          seq, data, nlmsg_len(nlh),
-                                          loginuid, sessionid, sid);
-               break;
        case AUDIT_ADD_RULE:
        case AUDIT_DEL_RULE:
                if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
                        return -EINVAL;
                if (audit_enabled == AUDIT_LOCKED) {
-                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-                                                 loginuid, sessionid, sid);
-
-                       audit_log_format(ab, " audit_enabled=%d res=0",
-                                        audit_enabled);
+                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
+                       audit_log_format(ab, " audit_enabled=%d res=0", audit_enabled);
                        audit_log_end(ab);
                        return -EPERM;
                }
                /* fallthrough */
        case AUDIT_LIST_RULES:
                err = audit_receive_filter(msg_type, NETLINK_CB(skb).portid,
-                                          seq, data, nlmsg_len(nlh),
-                                          loginuid, sessionid, sid);
+                                          seq, data, nlmsg_len(nlh));
                break;
        case AUDIT_TRIM:
                audit_trim_trees();
-
-               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-                                         loginuid, sessionid, sid);
-
+               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
                audit_log_format(ab, " op=trim res=1");
                audit_log_end(ab);
                break;
@@ -831,8 +778,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                /* OK, here comes... */
                err = audit_tag_tree(old, new);
 
-               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE,
-                                         loginuid, sessionid, sid);
+               audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
 
                audit_log_format(ab, " op=make_equiv old=");
                audit_log_untrustedstring(ab, old);
@@ -871,27 +817,30 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                struct audit_tty_status s;
                struct task_struct *tsk = current;
 
-               spin_lock_irq(&tsk->sighand->siglock);
+               spin_lock(&tsk->sighand->siglock);
                s.enabled = tsk->signal->audit_tty != 0;
-               spin_unlock_irq(&tsk->sighand->siglock);
+               s.log_passwd = tsk->signal->audit_tty_log_passwd;
+               spin_unlock(&tsk->sighand->siglock);
 
                audit_send_reply(NETLINK_CB(skb).portid, seq,
                                 AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
                break;
        }
        case AUDIT_TTY_SET: {
-               struct audit_tty_status *s;
+               struct audit_tty_status s;
                struct task_struct *tsk = current;
 
-               if (nlh->nlmsg_len < sizeof(struct audit_tty_status))
-                       return -EINVAL;
-               s = data;
-               if (s->enabled != 0 && s->enabled != 1)
+               memset(&s, 0, sizeof(s));
+               /* guard against past and future API changes */
+               memcpy(&s, data, min(sizeof(s), (size_t)nlh->nlmsg_len));
+               if ((s.enabled != 0 && s.enabled != 1) ||
+                   (s.log_passwd != 0 && s.log_passwd != 1))
                        return -EINVAL;
 
-               spin_lock_irq(&tsk->sighand->siglock);
-               tsk->signal->audit_tty = s->enabled != 0;
-               spin_unlock_irq(&tsk->sighand->siglock);
+               spin_lock(&tsk->sighand->siglock);
+               tsk->signal->audit_tty = s.enabled;
+               tsk->signal->audit_tty_log_passwd = s.log_passwd;
+               spin_unlock(&tsk->sighand->siglock);
                break;
        }
        default:
@@ -1434,6 +1383,14 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
        kfree(pathname);
 }
 
+void audit_log_session_info(struct audit_buffer *ab)
+{
+       u32 sessionid = audit_get_sessionid(current);
+       uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current));
+
+       audit_log_format(ab, " auid=%u ses=%u\n", auid, sessionid);
+}
+
 void audit_log_key(struct audit_buffer *ab, char *key)
 {
        audit_log_format(ab, " key=");
@@ -1443,6 +1400,224 @@ void audit_log_key(struct audit_buffer *ab, char *key)
                audit_log_format(ab, "(null)");
 }
 
+void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
+{
+       int i;
+
+       audit_log_format(ab, " %s=", prefix);
+       CAP_FOR_EACH_U32(i) {
+               audit_log_format(ab, "%08x",
+                                cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
+       }
+}
+
+void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
+{
+       kernel_cap_t *perm = &name->fcap.permitted;
+       kernel_cap_t *inh = &name->fcap.inheritable;
+       int log = 0;
+
+       if (!cap_isclear(*perm)) {
+               audit_log_cap(ab, "cap_fp", perm);
+               log = 1;
+       }
+       if (!cap_isclear(*inh)) {
+               audit_log_cap(ab, "cap_fi", inh);
+               log = 1;
+       }
+
+       if (log)
+               audit_log_format(ab, " cap_fe=%d cap_fver=%x",
+                                name->fcap.fE, name->fcap_ver);
+}
+
+static inline int audit_copy_fcaps(struct audit_names *name,
+                                  const struct dentry *dentry)
+{
+       struct cpu_vfs_cap_data caps;
+       int rc;
+
+       if (!dentry)
+               return 0;
+
+       rc = get_vfs_caps_from_disk(dentry, &caps);
+       if (rc)
+               return rc;
+
+       name->fcap.permitted = caps.permitted;
+       name->fcap.inheritable = caps.inheritable;
+       name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
+       name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >>
+                               VFS_CAP_REVISION_SHIFT;
+
+       return 0;
+}
+
+/* Copy inode data into an audit_names. */
+void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
+                     const struct inode *inode)
+{
+       name->ino   = inode->i_ino;
+       name->dev   = inode->i_sb->s_dev;
+       name->mode  = inode->i_mode;
+       name->uid   = inode->i_uid;
+       name->gid   = inode->i_gid;
+       name->rdev  = inode->i_rdev;
+       security_inode_getsecid(inode, &name->osid);
+       audit_copy_fcaps(name, dentry);
+}
+
+/**
+ * audit_log_name - produce AUDIT_PATH record from struct audit_names
+ * @context: audit_context for the task
+ * @n: audit_names structure with reportable details
+ * @path: optional path to report instead of audit_names->name
+ * @record_num: record number to report when handling a list of names
+ * @call_panic: optional pointer to int that will be updated if secid fails
+ */
+void audit_log_name(struct audit_context *context, struct audit_names *n,
+                   struct path *path, int record_num, int *call_panic)
+{
+       struct audit_buffer *ab;
+       ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
+       if (!ab)
+               return;
+
+       audit_log_format(ab, "item=%d", record_num);
+
+       if (path)
+               audit_log_d_path(ab, " name=", path);
+       else if (n->name) {
+               switch (n->name_len) {
+               case AUDIT_NAME_FULL:
+                       /* log the full path */
+                       audit_log_format(ab, " name=");
+                       audit_log_untrustedstring(ab, n->name->name);
+                       break;
+               case 0:
+                       /* name was specified as a relative path and the
+                        * directory component is the cwd */
+                       audit_log_d_path(ab, " name=", &context->pwd);
+                       break;
+               default:
+                       /* log the name's directory component */
+                       audit_log_format(ab, " name=");
+                       audit_log_n_untrustedstring(ab, n->name->name,
+                                                   n->name_len);
+               }
+       } else
+               audit_log_format(ab, " name=(null)");
+
+       if (n->ino != (unsigned long)-1) {
+               audit_log_format(ab, " inode=%lu"
+                                " dev=%02x:%02x mode=%#ho"
+                                " ouid=%u ogid=%u rdev=%02x:%02x",
+                                n->ino,
+                                MAJOR(n->dev),
+                                MINOR(n->dev),
+                                n->mode,
+                                from_kuid(&init_user_ns, n->uid),
+                                from_kgid(&init_user_ns, n->gid),
+                                MAJOR(n->rdev),
+                                MINOR(n->rdev));
+       }
+       if (n->osid != 0) {
+               char *ctx = NULL;
+               u32 len;
+               if (security_secid_to_secctx(
+                       n->osid, &ctx, &len)) {
+                       audit_log_format(ab, " osid=%u", n->osid);
+                       if (call_panic)
+                               *call_panic = 2;
+               } else {
+                       audit_log_format(ab, " obj=%s", ctx);
+                       security_release_secctx(ctx, len);
+               }
+       }
+
+       audit_log_fcaps(ab, n);
+       audit_log_end(ab);
+}
+
+int audit_log_task_context(struct audit_buffer *ab)
+{
+       char *ctx = NULL;
+       unsigned len;
+       int error;
+       u32 sid;
+
+       security_task_getsecid(current, &sid);
+       if (!sid)
+               return 0;
+
+       error = security_secid_to_secctx(sid, &ctx, &len);
+       if (error) {
+               if (error != -EINVAL)
+                       goto error_path;
+               return 0;
+       }
+
+       audit_log_format(ab, " subj=%s", ctx);
+       security_release_secctx(ctx, len);
+       return 0;
+
+error_path:
+       audit_panic("error in audit_log_task_context");
+       return error;
+}
+EXPORT_SYMBOL(audit_log_task_context);
+
+void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
+{
+       const struct cred *cred;
+       char name[sizeof(tsk->comm)];
+       struct mm_struct *mm = tsk->mm;
+       char *tty;
+
+       if (!ab)
+               return;
+
+       /* tsk == current */
+       cred = current_cred();
+
+       spin_lock_irq(&tsk->sighand->siglock);
+       if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
+               tty = tsk->signal->tty->name;
+       else
+               tty = "(none)";
+       spin_unlock_irq(&tsk->sighand->siglock);
+
+       audit_log_format(ab,
+                        " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
+                        " euid=%u suid=%u fsuid=%u"
+                        " egid=%u sgid=%u fsgid=%u ses=%u tty=%s",
+                        sys_getppid(),
+                        tsk->pid,
+                        from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
+                        from_kuid(&init_user_ns, cred->uid),
+                        from_kgid(&init_user_ns, cred->gid),
+                        from_kuid(&init_user_ns, cred->euid),
+                        from_kuid(&init_user_ns, cred->suid),
+                        from_kuid(&init_user_ns, cred->fsuid),
+                        from_kgid(&init_user_ns, cred->egid),
+                        from_kgid(&init_user_ns, cred->sgid),
+                        from_kgid(&init_user_ns, cred->fsgid),
+                        audit_get_sessionid(tsk), tty);
+
+       get_task_comm(name, tsk);
+       audit_log_format(ab, " comm=");
+       audit_log_untrustedstring(ab, name);
+
+       if (mm) {
+               down_read(&mm->mmap_sem);
+               if (mm->exe_file)
+                       audit_log_d_path(ab, " exe=", &mm->exe_file->f_path);
+               up_read(&mm->mmap_sem);
+       }
+       audit_log_task_context(ab);
+}
+EXPORT_SYMBOL(audit_log_task_info);
+
 /**
  * audit_log_link_denied - report a link restriction denial
  * @operation: specific link opreation
@@ -1451,19 +1626,28 @@ void audit_log_key(struct audit_buffer *ab, char *key)
 void audit_log_link_denied(const char *operation, struct path *link)
 {
        struct audit_buffer *ab;
+       struct audit_names *name;
+
+       name = kzalloc(sizeof(*name), GFP_NOFS);
+       if (!name)
+               return;
 
+       /* Generate AUDIT_ANOM_LINK with subject, operation, outcome. */
        ab = audit_log_start(current->audit_context, GFP_KERNEL,
                             AUDIT_ANOM_LINK);
        if (!ab)
-               return;
-       audit_log_format(ab, "op=%s action=denied", operation);
-       audit_log_format(ab, " pid=%d comm=", current->pid);
-       audit_log_untrustedstring(ab, current->comm);
-       audit_log_d_path(ab, " path=", link);
-       audit_log_format(ab, " dev=");
-       audit_log_untrustedstring(ab, link->dentry->d_inode->i_sb->s_id);
-       audit_log_format(ab, " ino=%lu", link->dentry->d_inode->i_ino);
+               goto out;
+       audit_log_format(ab, "op=%s", operation);
+       audit_log_task_info(ab, current);
+       audit_log_format(ab, " res=0");
        audit_log_end(ab);
+
+       /* Generate AUDIT_PATH record with object. */
+       name->type = AUDIT_TYPE_NORMAL;
+       audit_copy_inode(name, link->dentry, link->dentry->d_inode);
+       audit_log_name(current->audit_context, name, link, 0, NULL);
+out:
+       kfree(name);
 }
 
 /**
index 11468d99dad00b9f168071701c35965cf49ae585..1c95131ef760c2f45d140efdccfd2d08636b76d7 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/fs.h>
 #include <linux/audit.h>
 #include <linux/skbuff.h>
+#include <uapi/linux/mqueue.h>
 
 /* 0 = no checking
    1 = put_count checking
 */
 #define AUDIT_DEBUG 0
 
+/* AUDIT_NAMES is the number of slots we reserve in the audit_context
+ * for saving names from getname().  If we get more names we will allocate
+ * a name dynamically and also add those to the list anchored by names_list. */
+#define AUDIT_NAMES    5
+
 /* At task start time, the audit_state is set in the audit_context using
    a per-task filter.  At syscall entry, the audit_state is augmented by
    the syscall filter. */
@@ -59,8 +65,158 @@ struct audit_entry {
        struct audit_krule      rule;
 };
 
+struct audit_cap_data {
+       kernel_cap_t            permitted;
+       kernel_cap_t            inheritable;
+       union {
+               unsigned int    fE;             /* effective bit of file cap */
+               kernel_cap_t    effective;      /* effective set of process */
+       };
+};
+
+/* When fs/namei.c:getname() is called, we store the pointer in name and
+ * we don't let putname() free it (instead we free all of the saved
+ * pointers at syscall exit time).
+ *
+ * Further, in fs/namei.c:path_lookup() we store the inode and device.
+ */
+struct audit_names {
+       struct list_head        list;           /* audit_context->names_list */
+
+       struct filename         *name;
+       int                     name_len;       /* number of chars to log */
+       bool                    name_put;       /* call __putname()? */
+
+       unsigned long           ino;
+       dev_t                   dev;
+       umode_t                 mode;
+       kuid_t                  uid;
+       kgid_t                  gid;
+       dev_t                   rdev;
+       u32                     osid;
+       struct audit_cap_data   fcap;
+       unsigned int            fcap_ver;
+       unsigned char           type;           /* record type */
+       /*
+        * This was an allocated audit_names and not from the array of
+        * names allocated in the task audit context.  Thus this name
+        * should be freed on syscall exit.
+        */
+       bool                    should_free;
+};
+
+/* The per-task audit context. */
+struct audit_context {
+       int                 dummy;      /* must be the first element */
+       int                 in_syscall; /* 1 if task is in a syscall */
+       enum audit_state    state, current_state;
+       unsigned int        serial;     /* serial number for record */
+       int                 major;      /* syscall number */
+       struct timespec     ctime;      /* time of syscall entry */
+       unsigned long       argv[4];    /* syscall arguments */
+       long                return_code;/* syscall return code */
+       u64                 prio;
+       int                 return_valid; /* return code is valid */
+       /*
+        * The names_list is the list of all audit_names collected during this
+        * syscall.  The first AUDIT_NAMES entries in the names_list will
+        * actually be from the preallocated_names array for performance
+        * reasons.  Except during allocation they should never be referenced
+        * through the preallocated_names array and should only be found/used
+        * by running the names_list.
+        */
+       struct audit_names  preallocated_names[AUDIT_NAMES];
+       int                 name_count; /* total records in names_list */
+       struct list_head    names_list; /* struct audit_names->list anchor */
+       char                *filterkey; /* key for rule that triggered record */
+       struct path         pwd;
+       struct audit_aux_data *aux;
+       struct audit_aux_data *aux_pids;
+       struct sockaddr_storage *sockaddr;
+       size_t sockaddr_len;
+                               /* Save things to print about task_struct */
+       pid_t               pid, ppid;
+       kuid_t              uid, euid, suid, fsuid;
+       kgid_t              gid, egid, sgid, fsgid;
+       unsigned long       personality;
+       int                 arch;
+
+       pid_t               target_pid;
+       kuid_t              target_auid;
+       kuid_t              target_uid;
+       unsigned int        target_sessionid;
+       u32                 target_sid;
+       char                target_comm[TASK_COMM_LEN];
+
+       struct audit_tree_refs *trees, *first_trees;
+       struct list_head killed_trees;
+       int tree_count;
+
+       int type;
+       union {
+               struct {
+                       int nargs;
+                       long args[6];
+               } socketcall;
+               struct {
+                       kuid_t                  uid;
+                       kgid_t                  gid;
+                       umode_t                 mode;
+                       u32                     osid;
+                       int                     has_perm;
+                       uid_t                   perm_uid;
+                       gid_t                   perm_gid;
+                       umode_t                 perm_mode;
+                       unsigned long           qbytes;
+               } ipc;
+               struct {
+                       mqd_t                   mqdes;
+                       struct mq_attr          mqstat;
+               } mq_getsetattr;
+               struct {
+                       mqd_t                   mqdes;
+                       int                     sigev_signo;
+               } mq_notify;
+               struct {
+                       mqd_t                   mqdes;
+                       size_t                  msg_len;
+                       unsigned int            msg_prio;
+                       struct timespec         abs_timeout;
+               } mq_sendrecv;
+               struct {
+                       int                     oflag;
+                       umode_t                 mode;
+                       struct mq_attr          attr;
+               } mq_open;
+               struct {
+                       pid_t                   pid;
+                       struct audit_cap_data   cap;
+               } capset;
+               struct {
+                       int                     fd;
+                       int                     flags;
+               } mmap;
+       };
+       int fds[2];
+
+#if AUDIT_DEBUG
+       int                 put_count;
+       int                 ino_count;
+#endif
+};
+
 extern int audit_ever_enabled;
 
+extern void audit_copy_inode(struct audit_names *name,
+                            const struct dentry *dentry,
+                            const struct inode *inode);
+extern void audit_log_cap(struct audit_buffer *ab, char *prefix,
+                         kernel_cap_t *cap);
+extern void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name);
+extern void audit_log_name(struct audit_context *context,
+                          struct audit_names *n, struct path *path,
+                          int record_num, int *call_panic);
+
 extern int audit_pid;
 
 #define AUDIT_INODE_BUCKETS    32
index 267436826c3bc179678a57de72cf08220fa8faca..83a2970295d19ffdb1bd7417ed2b1b54a815cba1 100644 (file)
@@ -310,121 +310,83 @@ static u32 audit_to_op(u32 op)
        return n;
 }
 
-
-/* Translate struct audit_rule to kernel's rule respresentation.
- * Exists for backward compatibility with userspace. */
-static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
+/* check if an audit field is valid */
+static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
 {
-       struct audit_entry *entry;
-       int err = 0;
-       int i;
-
-       entry = audit_to_entry_common(rule);
-       if (IS_ERR(entry))
-               goto exit_nofree;
-
-       for (i = 0; i < rule->field_count; i++) {
-               struct audit_field *f = &entry->rule.fields[i];
-               u32 n;
-
-               n = rule->fields[i] & (AUDIT_NEGATE|AUDIT_OPERATORS);
-
-               /* Support for legacy operators where
-                * AUDIT_NEGATE bit signifies != and otherwise assumes == */
-               if (n & AUDIT_NEGATE)
-                       f->op = Audit_not_equal;
-               else if (!n)
-                       f->op = Audit_equal;
-               else
-                       f->op = audit_to_op(n);
-
-               entry->rule.vers_ops = (n & AUDIT_OPERATORS) ? 2 : 1;
-
-               f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS);
-               f->val = rule->values[i];
-               f->uid = INVALID_UID;
-               f->gid = INVALID_GID;
-
-               err = -EINVAL;
-               if (f->op == Audit_bad)
-                       goto exit_free;
-
-               switch(f->type) {
-               default:
-                       goto exit_free;
-               case AUDIT_UID:
-               case AUDIT_EUID:
-               case AUDIT_SUID:
-               case AUDIT_FSUID:
-               case AUDIT_LOGINUID:
-                       /* bit ops not implemented for uid comparisons */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-
-                       f->uid = make_kuid(current_user_ns(), f->val);
-                       if (!uid_valid(f->uid))
-                               goto exit_free;
-                       break;
-               case AUDIT_GID:
-               case AUDIT_EGID:
-               case AUDIT_SGID:
-               case AUDIT_FSGID:
-                       /* bit ops not implemented for gid comparisons */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-
-                       f->gid = make_kgid(current_user_ns(), f->val);
-                       if (!gid_valid(f->gid))
-                               goto exit_free;
-                       break;
-               case AUDIT_PID:
-               case AUDIT_PERS:
-               case AUDIT_MSGTYPE:
-               case AUDIT_PPID:
-               case AUDIT_DEVMAJOR:
-               case AUDIT_DEVMINOR:
-               case AUDIT_EXIT:
-               case AUDIT_SUCCESS:
-                       /* bit ops are only useful on syscall args */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-                       break;
-               case AUDIT_ARG0:
-               case AUDIT_ARG1:
-               case AUDIT_ARG2:
-               case AUDIT_ARG3:
-                       break;
-               /* arch is only allowed to be = or != */
-               case AUDIT_ARCH:
-                       if (f->op != Audit_not_equal && f->op != Audit_equal)
-                               goto exit_free;
-                       entry->rule.arch_f = f;
-                       break;
-               case AUDIT_PERM:
-                       if (f->val & ~15)
-                               goto exit_free;
-                       break;
-               case AUDIT_FILETYPE:
-                       if (f->val & ~S_IFMT)
-                               goto exit_free;
-                       break;
-               case AUDIT_INODE:
-                       err = audit_to_inode(&entry->rule, f);
-                       if (err)
-                               goto exit_free;
-                       break;
-               }
-       }
-
-       if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal)
-               entry->rule.inode_f = NULL;
-
-exit_nofree:
-       return entry;
+       switch(f->type) {
+       case AUDIT_MSGTYPE:
+               if (entry->rule.listnr != AUDIT_FILTER_TYPE &&
+                   entry->rule.listnr != AUDIT_FILTER_USER)
+                       return -EINVAL;
+               break;
+       };
 
-exit_free:
-       audit_free_rule(entry);
-       return ERR_PTR(err);
+       switch(f->type) {
+       default:
+               return -EINVAL;
+       case AUDIT_UID:
+       case AUDIT_EUID:
+       case AUDIT_SUID:
+       case AUDIT_FSUID:
+       case AUDIT_LOGINUID:
+       case AUDIT_OBJ_UID:
+       case AUDIT_GID:
+       case AUDIT_EGID:
+       case AUDIT_SGID:
+       case AUDIT_FSGID:
+       case AUDIT_OBJ_GID:
+       case AUDIT_PID:
+       case AUDIT_PERS:
+       case AUDIT_MSGTYPE:
+       case AUDIT_PPID:
+       case AUDIT_DEVMAJOR:
+       case AUDIT_DEVMINOR:
+       case AUDIT_EXIT:
+       case AUDIT_SUCCESS:
+               /* bit ops are only useful on syscall args */
+               if (f->op == Audit_bitmask || f->op == Audit_bittest)
+                       return -EINVAL;
+               break;
+       case AUDIT_ARG0:
+       case AUDIT_ARG1:
+       case AUDIT_ARG2:
+       case AUDIT_ARG3:
+       case AUDIT_SUBJ_USER:
+       case AUDIT_SUBJ_ROLE:
+       case AUDIT_SUBJ_TYPE:
+       case AUDIT_SUBJ_SEN:
+       case AUDIT_SUBJ_CLR:
+       case AUDIT_OBJ_USER:
+       case AUDIT_OBJ_ROLE:
+       case AUDIT_OBJ_TYPE:
+       case AUDIT_OBJ_LEV_LOW:
+       case AUDIT_OBJ_LEV_HIGH:
+       case AUDIT_WATCH:
+       case AUDIT_DIR:
+       case AUDIT_FILTERKEY:
+               break;
+       case AUDIT_LOGINUID_SET:
+               if ((f->val != 0) && (f->val != 1))
+                       return -EINVAL;
+       /* FALL THROUGH */
+       case AUDIT_ARCH:
+               if (f->op != Audit_not_equal && f->op != Audit_equal)
+                       return -EINVAL;
+               break;
+       case AUDIT_PERM:
+               if (f->val & ~15)
+                       return -EINVAL;
+               break;
+       case AUDIT_FILETYPE:
+               if (f->val & ~S_IFMT)
+                       return -EINVAL;
+               break;
+       case AUDIT_FIELD_COMPARE:
+               if (f->val > AUDIT_MAX_FIELD_COMPARE)
+                       return -EINVAL;
+               break;
+       };
+       return 0;
 }
 
 /* Translate struct audit_rule_data to kernel's rule respresentation. */
@@ -459,17 +421,25 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                f->gid = INVALID_GID;
                f->lsm_str = NULL;
                f->lsm_rule = NULL;
-               switch(f->type) {
+
+               /* Support legacy tests for a valid loginuid */
+               if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
+                       f->type = AUDIT_LOGINUID_SET;
+                       f->val = 0;
+               }
+
+               err = audit_field_valid(entry, f);
+               if (err)
+                       goto exit_free;
+
+               err = -EINVAL;
+               switch (f->type) {
+               case AUDIT_LOGINUID:
                case AUDIT_UID:
                case AUDIT_EUID:
                case AUDIT_SUID:
                case AUDIT_FSUID:
-               case AUDIT_LOGINUID:
                case AUDIT_OBJ_UID:
-                       /* bit ops not implemented for uid comparisons */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-
                        f->uid = make_kuid(current_user_ns(), f->val);
                        if (!uid_valid(f->uid))
                                goto exit_free;
@@ -479,27 +449,10 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                case AUDIT_SGID:
                case AUDIT_FSGID:
                case AUDIT_OBJ_GID:
-                       /* bit ops not implemented for gid comparisons */
-                       if (f->op == Audit_bitmask || f->op == Audit_bittest)
-                               goto exit_free;
-
                        f->gid = make_kgid(current_user_ns(), f->val);
                        if (!gid_valid(f->gid))
                                goto exit_free;
                        break;
-               case AUDIT_PID:
-               case AUDIT_PERS:
-               case AUDIT_MSGTYPE:
-               case AUDIT_PPID:
-               case AUDIT_DEVMAJOR:
-               case AUDIT_DEVMINOR:
-               case AUDIT_EXIT:
-               case AUDIT_SUCCESS:
-               case AUDIT_ARG0:
-               case AUDIT_ARG1:
-               case AUDIT_ARG2:
-               case AUDIT_ARG3:
-                       break;
                case AUDIT_ARCH:
                        entry->rule.arch_f = f;
                        break;
@@ -570,20 +523,6 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                        entry->rule.buflen += f->val;
                        entry->rule.filterkey = str;
                        break;
-               case AUDIT_PERM:
-                       if (f->val & ~15)
-                               goto exit_free;
-                       break;
-               case AUDIT_FILETYPE:
-                       if (f->val & ~S_IFMT)
-                               goto exit_free;
-                       break;
-               case AUDIT_FIELD_COMPARE:
-                       if (f->val > AUDIT_MAX_FIELD_COMPARE)
-                               goto exit_free;
-                       break;
-               default:
-                       goto exit_free;
                }
        }
 
@@ -613,36 +552,6 @@ static inline size_t audit_pack_string(void **bufp, const char *str)
        return len;
 }
 
-/* Translate kernel rule respresentation to struct audit_rule.
- * Exists for backward compatibility with userspace. */
-static struct audit_rule *audit_krule_to_rule(struct audit_krule *krule)
-{
-       struct audit_rule *rule;
-       int i;
-
-       rule = kzalloc(sizeof(*rule), GFP_KERNEL);
-       if (unlikely(!rule))
-               return NULL;
-
-       rule->flags = krule->flags | krule->listnr;
-       rule->action = krule->action;
-       rule->field_count = krule->field_count;
-       for (i = 0; i < rule->field_count; i++) {
-               rule->values[i] = krule->fields[i].val;
-               rule->fields[i] = krule->fields[i].type;
-
-               if (krule->vers_ops == 1) {
-                       if (krule->fields[i].op == Audit_not_equal)
-                               rule->fields[i] |= AUDIT_NEGATE;
-               } else {
-                       rule->fields[i] |= audit_ops[krule->fields[i].op];
-               }
-       }
-       for (i = 0; i < AUDIT_BITMASK_SIZE; i++) rule->mask[i] = krule->mask[i];
-
-       return rule;
-}
-
 /* Translate kernel rule respresentation to struct audit_rule_data. */
 static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
 {
@@ -1055,35 +964,6 @@ out:
        return ret;
 }
 
-/* List rules using struct audit_rule.  Exists for backward
- * compatibility with userspace. */
-static void audit_list(int pid, int seq, struct sk_buff_head *q)
-{
-       struct sk_buff *skb;
-       struct audit_krule *r;
-       int i;
-
-       /* This is a blocking read, so use audit_filter_mutex instead of rcu
-        * iterator to sync with list writers. */
-       for (i=0; i<AUDIT_NR_FILTERS; i++) {
-               list_for_each_entry(r, &audit_rules_list[i], list) {
-                       struct audit_rule *rule;
-
-                       rule = audit_krule_to_rule(r);
-                       if (unlikely(!rule))
-                               break;
-                       skb = audit_make_reply(pid, seq, AUDIT_LIST, 0, 1,
-                                        rule, sizeof(*rule));
-                       if (skb)
-                               skb_queue_tail(q, skb);
-                       kfree(rule);
-               }
-       }
-       skb = audit_make_reply(pid, seq, AUDIT_LIST, 1, 1, NULL, 0);
-       if (skb)
-               skb_queue_tail(q, skb);
-}
-
 /* List rules using struct audit_rule_data. */
 static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
 {
@@ -1113,11 +993,11 @@ static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
 }
 
 /* Log rule additions and removals */
-static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
-                                 char *action, struct audit_krule *rule,
-                                 int res)
+static void audit_log_rule_change(char *action, struct audit_krule *rule, int res)
 {
        struct audit_buffer *ab;
+       uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(current));
+       u32 sessionid = audit_get_sessionid(current);
 
        if (!audit_enabled)
                return;
@@ -1125,18 +1005,8 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
        if (!ab)
                return;
-       audit_log_format(ab, "auid=%u ses=%u",
-                        from_kuid(&init_user_ns, loginuid), sessionid);
-       if (sid) {
-               char *ctx = NULL;
-               u32 len;
-               if (security_secid_to_secctx(sid, &ctx, &len))
-                       audit_log_format(ab, " ssid=%u", sid);
-               else {
-                       audit_log_format(ab, " subj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
+       audit_log_format(ab, "auid=%u ses=%u" ,loginuid, sessionid);
+       audit_log_task_context(ab);
        audit_log_format(ab, " op=");
        audit_log_string(ab, action);
        audit_log_key(ab, rule->filterkey);
@@ -1155,8 +1025,7 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
  * @sessionid: sessionid for netlink audit message
  * @sid: SE Linux Security ID of sender
  */
-int audit_receive_filter(int type, int pid, int seq, void *data,
-                        size_t datasz, kuid_t loginuid, u32 sessionid, u32 sid)
+int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz)
 {
        struct task_struct *tsk;
        struct audit_netlink_list *dest;
@@ -1164,7 +1033,6 @@ int audit_receive_filter(int type, int pid, int seq, void *data,
        struct audit_entry *entry;
 
        switch (type) {
-       case AUDIT_LIST:
        case AUDIT_LIST_RULES:
                /* We can't just spew out the rules here because we might fill
                 * the available socket buffer space and deadlock waiting for
@@ -1179,10 +1047,7 @@ int audit_receive_filter(int type, int pid, int seq, void *data,
                skb_queue_head_init(&dest->q);
 
                mutex_lock(&audit_filter_mutex);
-               if (type == AUDIT_LIST)
-                       audit_list(pid, seq, &dest->q);
-               else
-                       audit_list_rules(pid, seq, &dest->q);
+               audit_list_rules(pid, seq, &dest->q);
                mutex_unlock(&audit_filter_mutex);
 
                tsk = kthread_run(audit_send_list, dest, "audit_send_list");
@@ -1192,35 +1057,23 @@ int audit_receive_filter(int type, int pid, int seq, void *data,
                        err = PTR_ERR(tsk);
                }
                break;
-       case AUDIT_ADD:
        case AUDIT_ADD_RULE:
-               if (type == AUDIT_ADD)
-                       entry = audit_rule_to_entry(data);
-               else
-                       entry = audit_data_to_entry(data, datasz);
+               entry = audit_data_to_entry(data, datasz);
                if (IS_ERR(entry))
                        return PTR_ERR(entry);
 
                err = audit_add_rule(entry);
-               audit_log_rule_change(loginuid, sessionid, sid, "add rule",
-                                     &entry->rule, !err);
-
+               audit_log_rule_change("add rule", &entry->rule, !err);
                if (err)
                        audit_free_rule(entry);
                break;
-       case AUDIT_DEL:
        case AUDIT_DEL_RULE:
-               if (type == AUDIT_DEL)
-                       entry = audit_rule_to_entry(data);
-               else
-                       entry = audit_data_to_entry(data, datasz);
+               entry = audit_data_to_entry(data, datasz);
                if (IS_ERR(entry))
                        return PTR_ERR(entry);
 
                err = audit_del_rule(entry);
-               audit_log_rule_change(loginuid, sessionid, sid, "remove rule",
-                                     &entry->rule, !err);
-
+               audit_log_rule_change("remove rule", &entry->rule, !err);
                audit_free_rule(entry);
                break;
        default:
@@ -1358,7 +1211,7 @@ int audit_compare_dname_path(const char *dname, const char *path, int parentlen)
        return strncmp(p, dname, dlen);
 }
 
-static int audit_filter_user_rules(struct audit_krule *rule,
+static int audit_filter_user_rules(struct audit_krule *rule, int type,
                                   enum audit_state *state)
 {
        int i;
@@ -1382,6 +1235,13 @@ static int audit_filter_user_rules(struct audit_krule *rule,
                        result = audit_uid_comparator(audit_get_loginuid(current),
                                                  f->op, f->uid);
                        break;
+               case AUDIT_LOGINUID_SET:
+                       result = audit_comparator(audit_loginuid_set(current),
+                                                 f->op, f->val);
+                       break;
+               case AUDIT_MSGTYPE:
+                       result = audit_comparator(type, f->op, f->val);
+                       break;
                case AUDIT_SUBJ_USER:
                case AUDIT_SUBJ_ROLE:
                case AUDIT_SUBJ_TYPE:
@@ -1408,7 +1268,7 @@ static int audit_filter_user_rules(struct audit_krule *rule,
        return 1;
 }
 
-int audit_filter_user(void)
+int audit_filter_user(int type)
 {
        enum audit_state state = AUDIT_DISABLED;
        struct audit_entry *e;
@@ -1416,7 +1276,7 @@ int audit_filter_user(void)
 
        rcu_read_lock();
        list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_USER], list) {
-               if (audit_filter_user_rules(&e->rule, &state)) {
+               if (audit_filter_user_rules(&e->rule, type, &state)) {
                        if (state == AUDIT_DISABLED)
                                ret = 0;
                        break;
index c68229411a7c20afbd6903f7272d23afbc0e9ac0..3c8a601324a280fe9224c872f24e2995243da830 100644 (file)
 #define AUDITSC_SUCCESS 1
 #define AUDITSC_FAILURE 2
 
-/* AUDIT_NAMES is the number of slots we reserve in the audit_context
- * for saving names from getname().  If we get more names we will allocate
- * a name dynamically and also add those to the list anchored by names_list. */
-#define AUDIT_NAMES    5
-
 /* no execve audit message should be longer than this (userspace limits) */
 #define MAX_EXECVE_AUDIT_LEN 7500
 
@@ -90,44 +85,6 @@ int audit_n_rules;
 /* determines whether we collect data for signals sent */
 int audit_signals;
 
-struct audit_cap_data {
-       kernel_cap_t            permitted;
-       kernel_cap_t            inheritable;
-       union {
-               unsigned int    fE;             /* effective bit of a file capability */
-               kernel_cap_t    effective;      /* effective set of a process */
-       };
-};
-
-/* When fs/namei.c:getname() is called, we store the pointer in name and
- * we don't let putname() free it (instead we free all of the saved
- * pointers at syscall exit time).
- *
- * Further, in fs/namei.c:path_lookup() we store the inode and device.
- */
-struct audit_names {
-       struct list_head        list;           /* audit_context->names_list */
-       struct filename *name;
-       unsigned long           ino;
-       dev_t                   dev;
-       umode_t                 mode;
-       kuid_t                  uid;
-       kgid_t                  gid;
-       dev_t                   rdev;
-       u32                     osid;
-       struct audit_cap_data    fcap;
-       unsigned int            fcap_ver;
-       int                     name_len;       /* number of name's characters to log */
-       unsigned char           type;           /* record type */
-       bool                    name_put;       /* call __putname() for this name */
-       /*
-        * This was an allocated audit_names and not from the array of
-        * names allocated in the task audit context.  Thus this name
-        * should be freed on syscall exit
-        */
-       bool                    should_free;
-};
-
 struct audit_aux_data {
        struct audit_aux_data   *next;
        int                     type;
@@ -175,106 +132,6 @@ struct audit_tree_refs {
        struct audit_chunk *c[31];
 };
 
-/* The per-task audit context. */
-struct audit_context {
-       int                 dummy;      /* must be the first element */
-       int                 in_syscall; /* 1 if task is in a syscall */
-       enum audit_state    state, current_state;
-       unsigned int        serial;     /* serial number for record */
-       int                 major;      /* syscall number */
-       struct timespec     ctime;      /* time of syscall entry */
-       unsigned long       argv[4];    /* syscall arguments */
-       long                return_code;/* syscall return code */
-       u64                 prio;
-       int                 return_valid; /* return code is valid */
-       /*
-        * The names_list is the list of all audit_names collected during this
-        * syscall.  The first AUDIT_NAMES entries in the names_list will
-        * actually be from the preallocated_names array for performance
-        * reasons.  Except during allocation they should never be referenced
-        * through the preallocated_names array and should only be found/used
-        * by running the names_list.
-        */
-       struct audit_names  preallocated_names[AUDIT_NAMES];
-       int                 name_count; /* total records in names_list */
-       struct list_head    names_list; /* anchor for struct audit_names->list */
-       char *              filterkey;  /* key for rule that triggered record */
-       struct path         pwd;
-       struct audit_aux_data *aux;
-       struct audit_aux_data *aux_pids;
-       struct sockaddr_storage *sockaddr;
-       size_t sockaddr_len;
-                               /* Save things to print about task_struct */
-       pid_t               pid, ppid;
-       kuid_t              uid, euid, suid, fsuid;
-       kgid_t              gid, egid, sgid, fsgid;
-       unsigned long       personality;
-       int                 arch;
-
-       pid_t               target_pid;
-       kuid_t              target_auid;
-       kuid_t              target_uid;
-       unsigned int        target_sessionid;
-       u32                 target_sid;
-       char                target_comm[TASK_COMM_LEN];
-
-       struct audit_tree_refs *trees, *first_trees;
-       struct list_head killed_trees;
-       int tree_count;
-
-       int type;
-       union {
-               struct {
-                       int nargs;
-                       long args[6];
-               } socketcall;
-               struct {
-                       kuid_t                  uid;
-                       kgid_t                  gid;
-                       umode_t                 mode;
-                       u32                     osid;
-                       int                     has_perm;
-                       uid_t                   perm_uid;
-                       gid_t                   perm_gid;
-                       umode_t                 perm_mode;
-                       unsigned long           qbytes;
-               } ipc;
-               struct {
-                       mqd_t                   mqdes;
-                       struct mq_attr          mqstat;
-               } mq_getsetattr;
-               struct {
-                       mqd_t                   mqdes;
-                       int                     sigev_signo;
-               } mq_notify;
-               struct {
-                       mqd_t                   mqdes;
-                       size_t                  msg_len;
-                       unsigned int            msg_prio;
-                       struct timespec         abs_timeout;
-               } mq_sendrecv;
-               struct {
-                       int                     oflag;
-                       umode_t                 mode;
-                       struct mq_attr          attr;
-               } mq_open;
-               struct {
-                       pid_t                   pid;
-                       struct audit_cap_data   cap;
-               } capset;
-               struct {
-                       int                     fd;
-                       int                     flags;
-               } mmap;
-       };
-       int fds[2];
-
-#if AUDIT_DEBUG
-       int                 put_count;
-       int                 ino_count;
-#endif
-};
-
 static inline int open_arg(int flags, int mask)
 {
        int n = ACC_MODE(flags);
@@ -633,9 +490,23 @@ static int audit_filter_rules(struct task_struct *tsk,
                        break;
                case AUDIT_GID:
                        result = audit_gid_comparator(cred->gid, f->op, f->gid);
+                       if (f->op == Audit_equal) {
+                               if (!result)
+                                       result = in_group_p(f->gid);
+                       } else if (f->op == Audit_not_equal) {
+                               if (result)
+                                       result = !in_group_p(f->gid);
+                       }
                        break;
                case AUDIT_EGID:
                        result = audit_gid_comparator(cred->egid, f->op, f->gid);
+                       if (f->op == Audit_equal) {
+                               if (!result)
+                                       result = in_egroup_p(f->gid);
+                       } else if (f->op == Audit_not_equal) {
+                               if (result)
+                                       result = !in_egroup_p(f->gid);
+                       }
                        break;
                case AUDIT_SGID:
                        result = audit_gid_comparator(cred->sgid, f->op, f->gid);
@@ -742,6 +613,9 @@ static int audit_filter_rules(struct task_struct *tsk,
                        if (ctx)
                                result = audit_uid_comparator(tsk->loginuid, f->op, f->uid);
                        break;
+               case AUDIT_LOGINUID_SET:
+                       result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
+                       break;
                case AUDIT_SUBJ_USER:
                case AUDIT_SUBJ_ROLE:
                case AUDIT_SUBJ_TYPE:
@@ -987,6 +861,8 @@ static inline void audit_free_names(struct audit_context *context)
 
 #if AUDIT_DEBUG == 2
        if (context->put_count + context->ino_count != context->name_count) {
+               int i = 0;
+
                printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d"
                       " name_count=%d put_count=%d"
                       " ino_count=%d [NOT freeing]\n",
@@ -995,7 +871,7 @@ static inline void audit_free_names(struct audit_context *context)
                       context->name_count, context->put_count,
                       context->ino_count);
                list_for_each_entry(n, &context->names_list, list) {
-                       printk(KERN_ERR "names[%d] = %p = %s\n", i,
+                       printk(KERN_ERR "names[%d] = %p = %s\n", i++,
                               n->name, n->name->name ?: "(null)");
                }
                dump_stack();
@@ -1010,7 +886,7 @@ static inline void audit_free_names(struct audit_context *context)
        list_for_each_entry_safe(n, next, &context->names_list, list) {
                list_del(&n->list);
                if (n->name && n->name_put)
-                       __putname(n->name);
+                       final_putname(n->name);
                if (n->should_free)
                        kfree(n);
        }
@@ -1093,88 +969,6 @@ static inline void audit_free_context(struct audit_context *context)
        kfree(context);
 }
 
-void audit_log_task_context(struct audit_buffer *ab)
-{
-       char *ctx = NULL;
-       unsigned len;
-       int error;
-       u32 sid;
-
-       security_task_getsecid(current, &sid);
-       if (!sid)
-               return;
-
-       error = security_secid_to_secctx(sid, &ctx, &len);
-       if (error) {
-               if (error != -EINVAL)
-                       goto error_path;
-               return;
-       }
-
-       audit_log_format(ab, " subj=%s", ctx);
-       security_release_secctx(ctx, len);
-       return;
-
-error_path:
-       audit_panic("error in audit_log_task_context");
-       return;
-}
-
-EXPORT_SYMBOL(audit_log_task_context);
-
-void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
-{
-       const struct cred *cred;
-       char name[sizeof(tsk->comm)];
-       struct mm_struct *mm = tsk->mm;
-       char *tty;
-
-       if (!ab)
-               return;
-
-       /* tsk == current */
-       cred = current_cred();
-
-       spin_lock_irq(&tsk->sighand->siglock);
-       if (tsk->signal && tsk->signal->tty)
-               tty = tsk->signal->tty->name;
-       else
-               tty = "(none)";
-       spin_unlock_irq(&tsk->sighand->siglock);
-
-
-       audit_log_format(ab,
-                        " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
-                        " euid=%u suid=%u fsuid=%u"
-                        " egid=%u sgid=%u fsgid=%u ses=%u tty=%s",
-                        sys_getppid(),
-                        tsk->pid,
-                        from_kuid(&init_user_ns, tsk->loginuid),
-                        from_kuid(&init_user_ns, cred->uid),
-                        from_kgid(&init_user_ns, cred->gid),
-                        from_kuid(&init_user_ns, cred->euid),
-                        from_kuid(&init_user_ns, cred->suid),
-                        from_kuid(&init_user_ns, cred->fsuid),
-                        from_kgid(&init_user_ns, cred->egid),
-                        from_kgid(&init_user_ns, cred->sgid),
-                        from_kgid(&init_user_ns, cred->fsgid),
-                        tsk->sessionid, tty);
-
-       get_task_comm(name, tsk);
-       audit_log_format(ab, " comm=");
-       audit_log_untrustedstring(ab, name);
-
-       if (mm) {
-               down_read(&mm->mmap_sem);
-               if (mm->exe_file)
-                       audit_log_d_path(ab, " exe=", &mm->exe_file->f_path);
-               up_read(&mm->mmap_sem);
-       }
-       audit_log_task_context(ab);
-}
-
-EXPORT_SYMBOL(audit_log_task_info);
-
 static int audit_log_pid_context(struct audit_context *context, pid_t pid,
                                 kuid_t auid, kuid_t uid, unsigned int sessionid,
                                 u32 sid, char *comm)
@@ -1191,12 +985,14 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
        audit_log_format(ab, "opid=%d oauid=%d ouid=%d oses=%d", pid,
                         from_kuid(&init_user_ns, auid),
                         from_kuid(&init_user_ns, uid), sessionid);
-       if (security_secid_to_secctx(sid, &ctx, &len)) {
-               audit_log_format(ab, " obj=(none)");
-               rc = 1;
-       } else {
-               audit_log_format(ab, " obj=%s", ctx);
-               security_release_secctx(ctx, len);
+       if (sid) {
+               if (security_secid_to_secctx(sid, &ctx, &len)) {
+                       audit_log_format(ab, " obj=(none)");
+                       rc = 1;
+               } else {
+                       audit_log_format(ab, " obj=%s", ctx);
+                       security_release_secctx(ctx, len);
+               }
        }
        audit_log_format(ab, " ocomm=");
        audit_log_untrustedstring(ab, comm);
@@ -1390,35 +1186,6 @@ static void audit_log_execve_info(struct audit_context *context,
        kfree(buf);
 }
 
-static void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
-{
-       int i;
-
-       audit_log_format(ab, " %s=", prefix);
-       CAP_FOR_EACH_U32(i) {
-               audit_log_format(ab, "%08x", cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]);
-       }
-}
-
-static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
-{
-       kernel_cap_t *perm = &name->fcap.permitted;
-       kernel_cap_t *inh = &name->fcap.inheritable;
-       int log = 0;
-
-       if (!cap_isclear(*perm)) {
-               audit_log_cap(ab, "cap_fp", perm);
-               log = 1;
-       }
-       if (!cap_isclear(*inh)) {
-               audit_log_cap(ab, "cap_fi", inh);
-               log = 1;
-       }
-
-       if (log)
-               audit_log_format(ab, " cap_fe=%d cap_fver=%x", name->fcap.fE, name->fcap_ver);
-}
-
 static void show_special(struct audit_context *context, int *call_panic)
 {
        struct audit_buffer *ab;
@@ -1516,68 +1283,6 @@ static void show_special(struct audit_context *context, int *call_panic)
        audit_log_end(ab);
 }
 
-static void audit_log_name(struct audit_context *context, struct audit_names *n,
-                          int record_num, int *call_panic)
-{
-       struct audit_buffer *ab;
-       ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
-       if (!ab)
-               return; /* audit_panic has been called */
-
-       audit_log_format(ab, "item=%d", record_num);
-
-       if (n->name) {
-               switch (n->name_len) {
-               case AUDIT_NAME_FULL:
-                       /* log the full path */
-                       audit_log_format(ab, " name=");
-                       audit_log_untrustedstring(ab, n->name->name);
-                       break;
-               case 0:
-                       /* name was specified as a relative path and the
-                        * directory component is the cwd */
-                       audit_log_d_path(ab, " name=", &context->pwd);
-                       break;
-               default:
-                       /* log the name's directory component */
-                       audit_log_format(ab, " name=");
-                       audit_log_n_untrustedstring(ab, n->name->name,
-                                                   n->name_len);
-               }
-       } else
-               audit_log_format(ab, " name=(null)");
-
-       if (n->ino != (unsigned long)-1) {
-               audit_log_format(ab, " inode=%lu"
-                                " dev=%02x:%02x mode=%#ho"
-                                " ouid=%u ogid=%u rdev=%02x:%02x",
-                                n->ino,
-                                MAJOR(n->dev),
-                                MINOR(n->dev),
-                                n->mode,
-                                from_kuid(&init_user_ns, n->uid),
-                                from_kgid(&init_user_ns, n->gid),
-                                MAJOR(n->rdev),
-                                MINOR(n->rdev));
-       }
-       if (n->osid != 0) {
-               char *ctx = NULL;
-               u32 len;
-               if (security_secid_to_secctx(
-                       n->osid, &ctx, &len)) {
-                       audit_log_format(ab, " osid=%u", n->osid);
-                       *call_panic = 2;
-               } else {
-                       audit_log_format(ab, " obj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
-
-       audit_log_fcaps(ab, n);
-
-       audit_log_end(ab);
-}
-
 static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
 {
        int i, call_panic = 0;
@@ -1695,7 +1400,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
 
        i = 0;
        list_for_each_entry(n, &context->names_list, list)
-               audit_log_name(context, n, i++, &call_panic);
+               audit_log_name(context, n, NULL, i++, &call_panic);
 
        /* Send end of event record to help user space know we are finished */
        ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
@@ -2030,18 +1735,18 @@ void audit_putname(struct filename *name)
        BUG_ON(!context);
        if (!context->in_syscall) {
 #if AUDIT_DEBUG == 2
-               printk(KERN_ERR "%s:%d(:%d): __putname(%p)\n",
+               printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n",
                       __FILE__, __LINE__, context->serial, name);
                if (context->name_count) {
                        struct audit_names *n;
-                       int i;
+                       int i = 0;
 
                        list_for_each_entry(n, &context->names_list, list)
-                               printk(KERN_ERR "name[%d] = %p = %s\n", i,
+                               printk(KERN_ERR "name[%d] = %p = %s\n", i++,
                                       n->name, n->name->name ?: "(null)");
                        }
 #endif
-               __putname(name);
+               final_putname(name);
        }
 #if AUDIT_DEBUG
        else {
@@ -2060,41 +1765,6 @@ void audit_putname(struct filename *name)
 #endif
 }
 
-static inline int audit_copy_fcaps(struct audit_names *name, const struct dentry *dentry)
-{
-       struct cpu_vfs_cap_data caps;
-       int rc;
-
-       if (!dentry)
-               return 0;
-
-       rc = get_vfs_caps_from_disk(dentry, &caps);
-       if (rc)
-               return rc;
-
-       name->fcap.permitted = caps.permitted;
-       name->fcap.inheritable = caps.inheritable;
-       name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE);
-       name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT;
-
-       return 0;
-}
-
-
-/* Copy inode data into an audit_names. */
-static void audit_copy_inode(struct audit_names *name, const struct dentry *dentry,
-                            const struct inode *inode)
-{
-       name->ino   = inode->i_ino;
-       name->dev   = inode->i_sb->s_dev;
-       name->mode  = inode->i_mode;
-       name->uid   = inode->i_uid;
-       name->gid   = inode->i_gid;
-       name->rdev  = inode->i_rdev;
-       security_inode_getsecid(inode, &name->osid);
-       audit_copy_fcaps(name, dentry);
-}
-
 /**
  * __audit_inode - store the inode and device from a lookup
  * @name: name being audited
@@ -2303,7 +1973,7 @@ int audit_set_loginuid(kuid_t loginuid)
        unsigned int sessionid;
 
 #ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE
-       if (uid_valid(task->loginuid))
+       if (audit_loginuid_set(task))
                return -EPERM;
 #else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
        if (!capable(CAP_AUDIT_CONTROL))
@@ -2471,17 +2141,20 @@ int __audit_bprm(struct linux_binprm *bprm)
 
 /**
  * audit_socketcall - record audit data for sys_socketcall
- * @nargs: number of args
+ * @nargs: number of args, which should not be more than AUDITSC_ARGS.
  * @args: args array
  *
  */
-void __audit_socketcall(int nargs, unsigned long *args)
+int __audit_socketcall(int nargs, unsigned long *args)
 {
        struct audit_context *context = current->audit_context;
 
+       if (nargs <= 0 || nargs > AUDITSC_ARGS || !args)
+               return -EINVAL;
        context->type = AUDIT_SOCKETCALL;
        context->socketcall.nargs = nargs;
        memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long));
+       return 0;
 }
 
 /**
index ed35345be536eefb68f1cd585b7c0213f24419b4..53b958fcd639eb2d93d9bed73cffbb9250df7c06 100644 (file)
@@ -613,10 +613,13 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
                       sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1),
                       GFP_KERNEL);
        if (!new) {
-               kfree(mk->mp);
+               kfree(attrs);
                err = -ENOMEM;
                goto fail;
        }
+       /* Despite looking like the typical realloc() bug, this is safe.
+        * We *want* the old 'attrs' to be freed either way, and we'll store
+        * the new one in the success case. */
        attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL);
        if (!attrs) {
                err = -ENOMEM;
index bfd6787b355a13813cb0b19c018d47a46f2bba8a..7078052284fd9eda7bae8a86805054328af7bb4b 100644 (file)
@@ -200,6 +200,7 @@ cond_syscall(sys_perf_event_open);
 /* fanotify! */
 cond_syscall(sys_fanotify_init);
 cond_syscall(sys_fanotify_mark);
+cond_syscall(compat_sys_fanotify_mark);
 
 /* open by handle */
 cond_syscall(sys_name_to_handle_at);
index ebf72358e86aec33c270edd7e8167789fea59ff9..aea4a9ea6fc845b884f84a367589a5945703e761 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/netdevice.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/compat.h>
 
 #ifdef CONFIG_SYSCTL_SYSCALL
 
@@ -1447,7 +1448,6 @@ SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
 
 
 #ifdef CONFIG_COMPAT
-#include <asm/compat.h>
 
 struct compat_sysctl_args {
        compat_uptr_t   name;
@@ -1459,7 +1459,7 @@ struct compat_sysctl_args {
        compat_ulong_t  __unused[4];
 };
 
-asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args)
+COMPAT_SYSCALL_DEFINE1(sysctl, struct compat_sysctl_args __user *, args)
 {
        struct compat_sysctl_args tmp;
        compat_size_t __user *compat_oldlenp;
index 5e9efd4b83a47fda4e70825078baabbc090a9a72..015f85aaca08f5f5d6eb55af1f1aab46670bb03b 100644 (file)
@@ -71,6 +71,7 @@ config TRACE_CLOCK
 config RING_BUFFER
        bool
        select TRACE_CLOCK
+       select IRQ_WORK
 
 config FTRACE_NMI_ENTER
        bool
@@ -107,7 +108,6 @@ config TRACING
        select BINARY_PRINTF
        select EVENT_TRACING
        select TRACE_CLOCK
-       select IRQ_WORK
 
 config GENERIC_TRACER
        bool
index 8a5c017bb50c141bfca4d8206ac2a1805d224185..b549b0f5b9771624159d8dbbf1b6a103127a842f 100644 (file)
 
 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define INIT_REGEX_LOCK(opsname)       \
+       .regex_lock     = __MUTEX_INITIALIZER(opsname.regex_lock),
+#else
+#define INIT_REGEX_LOCK(opsname)
+#endif
+
 static struct ftrace_ops ftrace_list_end __read_mostly = {
        .func           = ftrace_stub,
        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
@@ -131,6 +138,16 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
        while (likely(op = rcu_dereference_raw((op)->next)) &&  \
               unlikely((op) != &ftrace_list_end))
 
+static inline void ftrace_ops_init(struct ftrace_ops *ops)
+{
+#ifdef CONFIG_DYNAMIC_FTRACE
+       if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
+               mutex_init(&ops->regex_lock);
+               ops->flags |= FTRACE_OPS_FL_INITIALIZED;
+       }
+#endif
+}
+
 /**
  * ftrace_nr_registered_ops - return number of ops registered
  *
@@ -907,7 +924,8 @@ static void unregister_ftrace_profiler(void)
 #else
 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
        .func           = function_profile_call,
-       .flags          = FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(ftrace_profile_ops)
 };
 
 static int register_ftrace_profiler(void)
@@ -1103,11 +1121,10 @@ static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
        .notrace_hash           = EMPTY_HASH,
        .filter_hash            = EMPTY_HASH,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(global_ops)
 };
 
-static DEFINE_MUTEX(ftrace_regex_lock);
-
 struct ftrace_page {
        struct ftrace_page      *next;
        struct dyn_ftrace       *records;
@@ -1247,6 +1264,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
 
 void ftrace_free_filter(struct ftrace_ops *ops)
 {
+       ftrace_ops_init(ops);
        free_ftrace_hash(ops->filter_hash);
        free_ftrace_hash(ops->notrace_hash);
 }
@@ -2441,7 +2459,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
                     !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
 
                    ((iter->flags & FTRACE_ITER_ENABLED) &&
-                    !(rec->flags & ~FTRACE_FL_MASK))) {
+                    !(rec->flags & FTRACE_FL_ENABLED))) {
 
                        rec = NULL;
                        goto retry;
@@ -2624,6 +2642,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
        struct ftrace_hash *hash;
        int ret = 0;
 
+       ftrace_ops_init(ops);
+
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
@@ -2636,28 +2656,26 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                return -ENOMEM;
        }
 
+       iter->ops = ops;
+       iter->flags = flag;
+
+       mutex_lock(&ops->regex_lock);
+
        if (flag & FTRACE_ITER_NOTRACE)
                hash = ops->notrace_hash;
        else
                hash = ops->filter_hash;
 
-       iter->ops = ops;
-       iter->flags = flag;
-
        if (file->f_mode & FMODE_WRITE) {
-               mutex_lock(&ftrace_lock);
                iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
-               mutex_unlock(&ftrace_lock);
-
                if (!iter->hash) {
                        trace_parser_put(&iter->parser);
                        kfree(iter);
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       goto out_unlock;
                }
        }
 
-       mutex_lock(&ftrace_regex_lock);
-
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC))
                ftrace_filter_reset(iter->hash);
@@ -2677,7 +2695,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                }
        } else
                file->private_data = iter;
-       mutex_unlock(&ftrace_regex_lock);
+
+ out_unlock:
+       mutex_unlock(&ops->regex_lock);
 
        return ret;
 }
@@ -2910,6 +2930,8 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
 static struct ftrace_ops trace_probe_ops __read_mostly =
 {
        .func           = function_trace_probe_call,
+       .flags          = FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(trace_probe_ops)
 };
 
 static int ftrace_probe_registered;
@@ -2919,8 +2941,12 @@ static void __enable_ftrace_function_probe(void)
        int ret;
        int i;
 
-       if (ftrace_probe_registered)
+       if (ftrace_probe_registered) {
+               /* still need to update the function call sites */
+               if (ftrace_enabled)
+                       ftrace_run_update_code(FTRACE_UPDATE_CALLS);
                return;
+       }
 
        for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
                struct hlist_head *hhd = &ftrace_func_hash[i];
@@ -2990,19 +3016,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
        if (WARN_ON(not))
                return -EINVAL;
 
-       mutex_lock(&ftrace_lock);
+       mutex_lock(&trace_probe_ops.regex_lock);
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash) {
                count = -ENOMEM;
-               goto out_unlock;
+               goto out;
        }
 
        if (unlikely(ftrace_disabled)) {
                count = -ENODEV;
-               goto out_unlock;
+               goto out;
        }
 
+       mutex_lock(&ftrace_lock);
+
        do_for_each_ftrace_rec(pg, rec) {
 
                if (!ftrace_match_record(rec, NULL, search, len, type))
@@ -3056,6 +3084,8 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
 
  out_unlock:
        mutex_unlock(&ftrace_lock);
+ out:
+       mutex_unlock(&trace_probe_ops.regex_lock);
        free_ftrace_hash(hash);
 
        return count;
@@ -3095,7 +3125,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                        return;
        }
 
-       mutex_lock(&ftrace_lock);
+       mutex_lock(&trace_probe_ops.regex_lock);
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash)
@@ -3133,6 +3163,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                        list_add(&entry->free_list, &free_list);
                }
        }
+       mutex_lock(&ftrace_lock);
        __disable_ftrace_function_probe();
        /*
         * Remove after the disable is called. Otherwise, if the last
@@ -3144,9 +3175,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                list_del(&entry->free_list);
                ftrace_free_entry(entry);
        }
+       mutex_unlock(&ftrace_lock);
                
  out_unlock:
-       mutex_unlock(&ftrace_lock);
+       mutex_unlock(&trace_probe_ops.regex_lock);
        free_ftrace_hash(hash);
 }
 
@@ -3256,18 +3288,17 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
        if (!cnt)
                return 0;
 
-       mutex_lock(&ftrace_regex_lock);
-
-       ret = -ENODEV;
-       if (unlikely(ftrace_disabled))
-               goto out_unlock;
-
        if (file->f_mode & FMODE_READ) {
                struct seq_file *m = file->private_data;
                iter = m->private;
        } else
                iter = file->private_data;
 
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       /* iter->hash is a local copy, so we don't need regex_lock */
+
        parser = &iter->parser;
        read = trace_get_user(parser, ubuf, cnt, ppos);
 
@@ -3276,14 +3307,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
                ret = ftrace_process_regex(iter->hash, parser->buffer,
                                           parser->idx, enable);
                trace_parser_clear(parser);
-               if (ret)
-                       goto out_unlock;
+               if (ret < 0)
+                       goto out;
        }
 
        ret = read;
-out_unlock:
-       mutex_unlock(&ftrace_regex_lock);
-
+ out:
        return ret;
 }
 
@@ -3335,16 +3364,19 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
+       mutex_lock(&ops->regex_lock);
+
        if (enable)
                orig_hash = &ops->filter_hash;
        else
                orig_hash = &ops->notrace_hash;
 
        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
-       if (!hash)
-               return -ENOMEM;
+       if (!hash) {
+               ret = -ENOMEM;
+               goto out_regex_unlock;
+       }
 
-       mutex_lock(&ftrace_regex_lock);
        if (reset)
                ftrace_filter_reset(hash);
        if (buf && !ftrace_match_records(hash, buf, len)) {
@@ -3366,7 +3398,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        mutex_unlock(&ftrace_lock);
 
  out_regex_unlock:
-       mutex_unlock(&ftrace_regex_lock);
+       mutex_unlock(&ops->regex_lock);
 
        free_ftrace_hash(hash);
        return ret;
@@ -3392,6 +3424,7 @@ ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
                         int remove, int reset)
 {
+       ftrace_ops_init(ops);
        return ftrace_set_addr(ops, ip, remove, reset, 1);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
@@ -3416,6 +3449,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
                       int len, int reset)
 {
+       ftrace_ops_init(ops);
        return ftrace_set_regex(ops, buf, len, reset, 1);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter);
@@ -3434,6 +3468,7 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
                        int len, int reset)
 {
+       ftrace_ops_init(ops);
        return ftrace_set_regex(ops, buf, len, reset, 0);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
@@ -3524,6 +3559,8 @@ ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
 {
        char *func;
 
+       ftrace_ops_init(ops);
+
        while (buf) {
                func = strsep(&buf, ",");
                ftrace_set_regex(ops, func, strlen(func), 0, enable);
@@ -3551,10 +3588,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
        int filter_hash;
        int ret;
 
-       mutex_lock(&ftrace_regex_lock);
        if (file->f_mode & FMODE_READ) {
                iter = m->private;
-
                seq_release(inode, file);
        } else
                iter = file->private_data;
@@ -3567,6 +3602,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        trace_parser_put(parser);
 
+       mutex_lock(&iter->ops->regex_lock);
+
        if (file->f_mode & FMODE_WRITE) {
                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
 
@@ -3584,10 +3621,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
                mutex_unlock(&ftrace_lock);
        }
+
+       mutex_unlock(&iter->ops->regex_lock);
        free_ftrace_hash(iter->hash);
        kfree(iter);
 
-       mutex_unlock(&ftrace_regex_lock);
        return 0;
 }
 
@@ -4126,7 +4164,8 @@ void __init ftrace_init(void)
 
 static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(global_ops)
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -4180,8 +4219,9 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
 }
 
 static struct ftrace_ops control_ops = {
-       .func = ftrace_ops_control_func,
-       .flags = FTRACE_OPS_FL_RECURSION_SAFE,
+       .func   = ftrace_ops_control_func,
+       .flags  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       INIT_REGEX_LOCK(control_ops)
 };
 
 static inline void
@@ -4539,6 +4579,8 @@ int register_ftrace_function(struct ftrace_ops *ops)
 {
        int ret = -1;
 
+       ftrace_ops_init(ops);
+
        mutex_lock(&ftrace_lock);
 
        ret = __register_ftrace_function(ops);
index 53582e982e51673e7294080097e9acc7b2bdf8f2..7a0cf68027ccf62118d4eb1da83ec00af3a0b49b 100644 (file)
@@ -251,7 +251,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
        switch (enable) {
        case 0:
                /*
-                * When soft_disable is set and enable is cleared, we want
+                * When soft_disable is set and enable is cleared, the sm_ref
+                * reference counter is decremented. If it reaches 0, we want
                 * to clear the SOFT_DISABLED flag but leave the event in the
                 * state that it was. That is, if the event was enabled and
                 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
@@ -263,6 +264,8 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
                 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
                 */
                if (soft_disable) {
+                       if (atomic_dec_return(&file->sm_ref) > 0)
+                               break;
                        disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED;
                        clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
                } else
@@ -291,8 +294,11 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
                 */
                if (!soft_disable)
                        clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags);
-               else
+               else {
+                       if (atomic_inc_return(&file->sm_ref) > 1)
+                               break;
                        set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags);
+               }
 
                if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) {
 
@@ -623,6 +629,8 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
        if (file->flags & FTRACE_EVENT_FL_ENABLED) {
                if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)
                        buf = "0*\n";
+               else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE)
+                       buf = "1*\n";
                else
                        buf = "1\n";
        } else
@@ -1521,6 +1529,24 @@ __register_event(struct ftrace_event_call *call, struct module *mod)
        return 0;
 }
 
+static struct ftrace_event_file *
+trace_create_new_event(struct ftrace_event_call *call,
+                      struct trace_array *tr)
+{
+       struct ftrace_event_file *file;
+
+       file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+       if (!file)
+               return NULL;
+
+       file->event_call = call;
+       file->tr = tr;
+       atomic_set(&file->sm_ref, 0);
+       list_add(&file->list, &tr->events);
+
+       return file;
+}
+
 /* Add an event to a trace directory */
 static int
 __trace_add_new_event(struct ftrace_event_call *call,
@@ -1532,14 +1558,10 @@ __trace_add_new_event(struct ftrace_event_call *call,
 {
        struct ftrace_event_file *file;
 
-       file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+       file = trace_create_new_event(call, tr);
        if (!file)
                return -ENOMEM;
 
-       file->event_call = call;
-       file->tr = tr;
-       list_add(&file->list, &tr->events);
-
        return event_create_dir(tr->event_dir, file, id, enable, filter, format);
 }
 
@@ -1554,14 +1576,10 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
 {
        struct ftrace_event_file *file;
 
-       file = kmem_cache_alloc(file_cachep, GFP_TRACE);
+       file = trace_create_new_event(call, tr);
        if (!file)
                return -ENOMEM;
 
-       file->event_call = call;
-       file->tr = tr;
-       list_add(&file->list, &tr->events);
-
        return 0;
 }
 
@@ -2061,8 +2079,18 @@ event_enable_func(struct ftrace_hash *hash,
        if (ret < 0)
                goto out_put;
        ret = register_ftrace_function_probe(glob, ops, data);
-       if (!ret)
+       /*
+        * The above returns on success the # of functions enabled,
+        * but if it didn't find any functions it returns zero.
+        * Consider no functions a failure too.
+        */
+       if (!ret) {
+               ret = -ENOENT;
+               goto out_disable;
+       } else if (ret < 0)
                goto out_disable;
+       /* Just return zero, not the number of enabled functions */
+       ret = 0;
  out:
        mutex_unlock(&event_mutex);
        return ret;
index 1865d5f765387f55600d063a633e6a7e79bcae3c..636d45fe69b37a80eefafe6b3aa6d46220f132dd 100644 (file)
@@ -27,7 +27,6 @@
 /**
  * Kprobe event core functions
  */
-
 struct trace_probe {
        struct list_head        list;
        struct kretprobe        rp;     /* Use rp.kp for kprobe use */
@@ -36,6 +35,7 @@ struct trace_probe {
        const char              *symbol;        /* symbol name */
        struct ftrace_event_class       class;
        struct ftrace_event_call        call;
+       struct ftrace_event_file        **files;
        ssize_t                 size;           /* trace entry size */
        unsigned int            nr_args;
        struct probe_arg        args[];
@@ -46,7 +46,7 @@ struct trace_probe {
        (sizeof(struct probe_arg) * (n)))
 
 
-static __kprobes int trace_probe_is_return(struct trace_probe *tp)
+static __kprobes bool trace_probe_is_return(struct trace_probe *tp)
 {
        return tp->rp.handler != NULL;
 }
@@ -183,12 +183,57 @@ static struct trace_probe *find_trace_probe(const char *event,
        return NULL;
 }
 
-/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
-static int enable_trace_probe(struct trace_probe *tp, int flag)
+static int trace_probe_nr_files(struct trace_probe *tp)
+{
+       struct ftrace_event_file **file = tp->files;
+       int ret = 0;
+
+       if (file)
+               while (*(file++))
+                       ret++;
+
+       return ret;
+}
+
+static DEFINE_MUTEX(probe_enable_lock);
+
+/*
+ * Enable trace_probe
+ * if the file is NULL, enable "perf" handler, or enable "trace" handler.
+ */
+static int
+enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
 {
        int ret = 0;
 
-       tp->flags |= flag;
+       mutex_lock(&probe_enable_lock);
+
+       if (file) {
+               struct ftrace_event_file **new, **old = tp->files;
+               int n = trace_probe_nr_files(tp);
+
+               /* 1 is for new one and 1 is for stopper */
+               new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
+                             GFP_KERNEL);
+               if (!new) {
+                       ret = -ENOMEM;
+                       goto out_unlock;
+               }
+               memcpy(new, old, n * sizeof(struct ftrace_event_file *));
+               new[n] = file;
+               /* The last one keeps a NULL */
+
+               rcu_assign_pointer(tp->files, new);
+               tp->flags |= TP_FLAG_TRACE;
+
+               if (old) {
+                       /* Make sure the probe is done with old files */
+                       synchronize_sched();
+                       kfree(old);
+               }
+       } else
+               tp->flags |= TP_FLAG_PROFILE;
+
        if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
            !trace_probe_has_gone(tp)) {
                if (trace_probe_is_return(tp))
@@ -197,19 +242,83 @@ static int enable_trace_probe(struct trace_probe *tp, int flag)
                        ret = enable_kprobe(&tp->rp.kp);
        }
 
+ out_unlock:
+       mutex_unlock(&probe_enable_lock);
+
        return ret;
 }
 
-/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
-static void disable_trace_probe(struct trace_probe *tp, int flag)
+static int
+trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
+{
+       int i;
+
+       if (tp->files) {
+               for (i = 0; tp->files[i]; i++)
+                       if (tp->files[i] == file)
+                               return i;
+       }
+
+       return -1;
+}
+
+/*
+ * Disable trace_probe
+ * if the file is NULL, disable "perf" handler, or disable "trace" handler.
+ */
+static int
+disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
 {
-       tp->flags &= ~flag;
+       int ret = 0;
+
+       mutex_lock(&probe_enable_lock);
+
+       if (file) {
+               struct ftrace_event_file **new, **old = tp->files;
+               int n = trace_probe_nr_files(tp);
+               int i, j;
+
+               if (n == 0 || trace_probe_file_index(tp, file) < 0) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+
+               if (n == 1) {   /* Remove the last file */
+                       tp->flags &= ~TP_FLAG_TRACE;
+                       new = NULL;
+               } else {
+                       new = kzalloc(n * sizeof(struct ftrace_event_file *),
+                                     GFP_KERNEL);
+                       if (!new) {
+                               ret = -ENOMEM;
+                               goto out_unlock;
+                       }
+
+                       /* This copy & check loop copies the NULL stopper too */
+                       for (i = 0, j = 0; j < n && i < n + 1; i++)
+                               if (old[i] != file)
+                                       new[j++] = old[i];
+               }
+
+               rcu_assign_pointer(tp->files, new);
+
+               /* Make sure the probe is done with old files */
+               synchronize_sched();
+               kfree(old);
+       } else
+               tp->flags &= ~TP_FLAG_PROFILE;
+
        if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
                if (trace_probe_is_return(tp))
                        disable_kretprobe(&tp->rp);
                else
                        disable_kprobe(&tp->rp.kp);
        }
+
+ out_unlock:
+       mutex_unlock(&probe_enable_lock);
+
+       return ret;
 }
 
 /* Internal register function - just handle k*probes and flags */
@@ -723,9 +832,10 @@ static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
 }
 
 /* Kprobe handler */
-static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
+static __kprobes void
+__kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
+                   struct ftrace_event_file *ftrace_file)
 {
-       struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
        struct kprobe_trace_entry_head *entry;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
@@ -733,7 +843,10 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
        unsigned long irq_flags;
        struct ftrace_event_call *call = &tp->call;
 
-       tp->nhit++;
+       WARN_ON(call != ftrace_file->event_call);
+
+       if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
+               return;
 
        local_save_flags(irq_flags);
        pc = preempt_count();
@@ -741,13 +854,14 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
        dsize = __get_data_size(tp, regs);
        size = sizeof(*entry) + tp->size + dsize;
 
-       event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
-                                                 size, irq_flags, pc);
+       event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
+                                               call->event.type,
+                                               size, irq_flags, pc);
        if (!event)
                return;
 
        entry = ring_buffer_event_data(event);
-       entry->ip = (unsigned long)kp->addr;
+       entry->ip = (unsigned long)tp->rp.kp.addr;
        store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
        if (!filter_current_check_discard(buffer, call, entry, event))
@@ -755,11 +869,24 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
                                                irq_flags, pc, regs);
 }
 
+static __kprobes void
+kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
+{
+       struct ftrace_event_file **file = tp->files;
+
+       /* Note: preempt is already disabled around the kprobe handler */
+       while (*file) {
+               __kprobe_trace_func(tp, regs, *file);
+               file++;
+       }
+}
+
 /* Kretprobe handler */
-static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
-                                         struct pt_regs *regs)
+static __kprobes void
+__kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
+                      struct pt_regs *regs,
+                      struct ftrace_event_file *ftrace_file)
 {
-       struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
        struct kretprobe_trace_entry_head *entry;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
@@ -767,14 +894,20 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
        unsigned long irq_flags;
        struct ftrace_event_call *call = &tp->call;
 
+       WARN_ON(call != ftrace_file->event_call);
+
+       if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags))
+               return;
+
        local_save_flags(irq_flags);
        pc = preempt_count();
 
        dsize = __get_data_size(tp, regs);
        size = sizeof(*entry) + tp->size + dsize;
 
-       event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
-                                                 size, irq_flags, pc);
+       event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
+                                               call->event.type,
+                                               size, irq_flags, pc);
        if (!event)
                return;
 
@@ -788,6 +921,19 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
                                                irq_flags, pc, regs);
 }
 
+static __kprobes void
+kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
+                    struct pt_regs *regs)
+{
+       struct ftrace_event_file **file = tp->files;
+
+       /* Note: preempt is already disabled around the kprobe handler */
+       while (*file) {
+               __kretprobe_trace_func(tp, ri, regs, *file);
+               file++;
+       }
+}
+
 /* Event entry printers */
 enum print_line_t
 print_kprobe_event(struct trace_iterator *iter, int flags,
@@ -975,10 +1121,9 @@ static int set_print_fmt(struct trace_probe *tp)
 #ifdef CONFIG_PERF_EVENTS
 
 /* Kprobe profile handler */
-static __kprobes void kprobe_perf_func(struct kprobe *kp,
-                                        struct pt_regs *regs)
+static __kprobes void
+kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
 {
-       struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
        struct ftrace_event_call *call = &tp->call;
        struct kprobe_trace_entry_head *entry;
        struct hlist_head *head;
@@ -997,7 +1142,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
        if (!entry)
                return;
 
-       entry->ip = (unsigned long)kp->addr;
+       entry->ip = (unsigned long)tp->rp.kp.addr;
        memset(&entry[1], 0, dsize);
        store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
@@ -1007,10 +1152,10 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
 }
 
 /* Kretprobe profile handler */
-static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
-                                           struct pt_regs *regs)
+static __kprobes void
+kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
+                   struct pt_regs *regs)
 {
-       struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
        struct ftrace_event_call *call = &tp->call;
        struct kretprobe_trace_entry_head *entry;
        struct hlist_head *head;
@@ -1044,20 +1189,19 @@ int kprobe_register(struct ftrace_event_call *event,
                    enum trace_reg type, void *data)
 {
        struct trace_probe *tp = (struct trace_probe *)event->data;
+       struct ftrace_event_file *file = data;
 
        switch (type) {
        case TRACE_REG_REGISTER:
-               return enable_trace_probe(tp, TP_FLAG_TRACE);
+               return enable_trace_probe(tp, file);
        case TRACE_REG_UNREGISTER:
-               disable_trace_probe(tp, TP_FLAG_TRACE);
-               return 0;
+               return disable_trace_probe(tp, file);
 
 #ifdef CONFIG_PERF_EVENTS
        case TRACE_REG_PERF_REGISTER:
-               return enable_trace_probe(tp, TP_FLAG_PROFILE);
+               return enable_trace_probe(tp, NULL);
        case TRACE_REG_PERF_UNREGISTER:
-               disable_trace_probe(tp, TP_FLAG_PROFILE);
-               return 0;
+               return disable_trace_probe(tp, NULL);
        case TRACE_REG_PERF_OPEN:
        case TRACE_REG_PERF_CLOSE:
        case TRACE_REG_PERF_ADD:
@@ -1073,11 +1217,13 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
 {
        struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
 
+       tp->nhit++;
+
        if (tp->flags & TP_FLAG_TRACE)
-               kprobe_trace_func(kp, regs);
+               kprobe_trace_func(tp, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tp->flags & TP_FLAG_PROFILE)
-               kprobe_perf_func(kp, regs);
+               kprobe_perf_func(tp, regs);
 #endif
        return 0;       /* We don't tweek kernel, so just return 0 */
 }
@@ -1087,11 +1233,13 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
        struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
 
+       tp->nhit++;
+
        if (tp->flags & TP_FLAG_TRACE)
-               kretprobe_trace_func(ri, regs);
+               kretprobe_trace_func(tp, ri, regs);
 #ifdef CONFIG_PERF_EVENTS
        if (tp->flags & TP_FLAG_PROFILE)
-               kretprobe_perf_func(ri, regs);
+               kretprobe_perf_func(tp, ri, regs);
 #endif
        return 0;       /* We don't tweek kernel, so just return 0 */
 }
@@ -1189,11 +1337,24 @@ static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
        return a1 + a2 + a3 + a4 + a5 + a6;
 }
 
+static struct ftrace_event_file *
+find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
+{
+       struct ftrace_event_file *file;
+
+       list_for_each_entry(file, &tr->events, list)
+               if (file->event_call == &tp->call)
+                       return file;
+
+       return NULL;
+}
+
 static __init int kprobe_trace_self_tests_init(void)
 {
        int ret, warn = 0;
        int (*target)(int, int, int, int, int, int);
        struct trace_probe *tp;
+       struct ftrace_event_file *file;
 
        target = kprobe_trace_selftest_target;
 
@@ -1203,31 +1364,43 @@ static __init int kprobe_trace_self_tests_init(void)
                                  "$stack $stack0 +0($stack)",
                                  create_trace_probe);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error on probing function entry.\n");
+               pr_warn("error on probing function entry.\n");
                warn++;
        } else {
                /* Enable trace point */
                tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
                if (WARN_ON_ONCE(tp == NULL)) {
-                       pr_warning("error on getting new probe.\n");
+                       pr_warn("error on getting new probe.\n");
                        warn++;
-               } else
-                       enable_trace_probe(tp, TP_FLAG_TRACE);
+               } else {
+                       file = find_trace_probe_file(tp, top_trace_array());
+                       if (WARN_ON_ONCE(file == NULL)) {
+                               pr_warn("error on getting probe file.\n");
+                               warn++;
+                       } else
+                               enable_trace_probe(tp, file);
+               }
        }
 
        ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
                                  "$retval", create_trace_probe);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error on probing function return.\n");
+               pr_warn("error on probing function return.\n");
                warn++;
        } else {
                /* Enable trace point */
                tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
                if (WARN_ON_ONCE(tp == NULL)) {
-                       pr_warning("error on getting new probe.\n");
+                       pr_warn("error on getting 2nd new probe.\n");
                        warn++;
-               } else
-                       enable_trace_probe(tp, TP_FLAG_TRACE);
+               } else {
+                       file = find_trace_probe_file(tp, top_trace_array());
+                       if (WARN_ON_ONCE(file == NULL)) {
+                               pr_warn("error on getting probe file.\n");
+                               warn++;
+                       } else
+                               enable_trace_probe(tp, file);
+               }
        }
 
        if (warn)
@@ -1238,27 +1411,39 @@ static __init int kprobe_trace_self_tests_init(void)
        /* Disable trace points before removing it */
        tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
        if (WARN_ON_ONCE(tp == NULL)) {
-               pr_warning("error on getting test probe.\n");
+               pr_warn("error on getting test probe.\n");
                warn++;
-       } else
-               disable_trace_probe(tp, TP_FLAG_TRACE);
+       } else {
+               file = find_trace_probe_file(tp, top_trace_array());
+               if (WARN_ON_ONCE(file == NULL)) {
+                       pr_warn("error on getting probe file.\n");
+                       warn++;
+               } else
+                       disable_trace_probe(tp, file);
+       }
 
        tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
        if (WARN_ON_ONCE(tp == NULL)) {
-               pr_warning("error on getting 2nd test probe.\n");
+               pr_warn("error on getting 2nd test probe.\n");
                warn++;
-       } else
-               disable_trace_probe(tp, TP_FLAG_TRACE);
+       } else {
+               file = find_trace_probe_file(tp, top_trace_array());
+               if (WARN_ON_ONCE(file == NULL)) {
+                       pr_warn("error on getting probe file.\n");
+                       warn++;
+               } else
+                       disable_trace_probe(tp, file);
+       }
 
        ret = traceprobe_command("-:testprobe", create_trace_probe);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error on deleting a probe.\n");
+               pr_warn("error on deleting a probe.\n");
                warn++;
        }
 
        ret = traceprobe_command("-:testprobe2", create_trace_probe);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error on deleting a probe.\n");
+               pr_warn("error on deleting a probe.\n");
                warn++;
        }
 
index ace5e55fe5a32ed01fd8ca3c0c63c802632ac547..061523eb52a1d9362ced1be114eec8e56ad734d5 100644 (file)
@@ -597,7 +597,15 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
        struct hci_dev *hdev = req->hdev;
        u8 p;
 
-       /* Only send HCI_Delete_Stored_Link_Key if it is supported */
+       /* Some Broadcom based Bluetooth controllers do not support the
+        * Delete Stored Link Key command. They are clearly indicating its
+        * absence in the bit mask of supported commands.
+        *
+        * Check the supported commands and only if the the command is marked
+        * as supported send it. If not supported assume that the controller
+        * does not have actual support for stored link keys which makes this
+        * command redundant anyway.
+         */
        if (hdev->commands[6] & 0x80) {
                struct hci_cp_delete_stored_link_key cp;
 
@@ -751,7 +759,7 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
        hdev->discovery.state = state;
 }
 
-static void inquiry_cache_flush(struct hci_dev *hdev)
+void hci_inquiry_cache_flush(struct hci_dev *hdev)
 {
        struct discovery_state *cache = &hdev->discovery;
        struct inquiry_entry *p, *n;
@@ -964,7 +972,7 @@ int hci_inquiry(void __user *arg)
        hci_dev_lock(hdev);
        if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
            inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
-               inquiry_cache_flush(hdev);
+               hci_inquiry_cache_flush(hdev);
                do_inquiry = 1;
        }
        hci_dev_unlock(hdev);
@@ -1201,8 +1209,6 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 {
        BT_DBG("%s %p", hdev->name, hdev);
 
-       cancel_work_sync(&hdev->le_scan);
-
        cancel_delayed_work(&hdev->power_off);
 
        hci_req_cancel(hdev, ENODEV);
@@ -1230,7 +1236,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        cancel_delayed_work_sync(&hdev->le_scan_disable);
 
        hci_dev_lock(hdev);
-       inquiry_cache_flush(hdev);
+       hci_inquiry_cache_flush(hdev);
        hci_conn_hash_flush(hdev);
        hci_dev_unlock(hdev);
 
@@ -1331,7 +1337,7 @@ int hci_dev_reset(__u16 dev)
        skb_queue_purge(&hdev->cmd_q);
 
        hci_dev_lock(hdev);
-       inquiry_cache_flush(hdev);
+       hci_inquiry_cache_flush(hdev);
        hci_conn_hash_flush(hdev);
        hci_dev_unlock(hdev);
 
@@ -1991,80 +1997,59 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
        return mgmt_device_unblocked(hdev, bdaddr, type);
 }
 
-static void le_scan_param_req(struct hci_request *req, unsigned long opt)
+static void inquiry_complete(struct hci_dev *hdev, u8 status)
 {
-       struct le_scan_params *param =  (struct le_scan_params *) opt;
-       struct hci_cp_le_set_scan_param cp;
-
-       memset(&cp, 0, sizeof(cp));
-       cp.type = param->type;
-       cp.interval = cpu_to_le16(param->interval);
-       cp.window = cpu_to_le16(param->window);
+       if (status) {
+               BT_ERR("Failed to start inquiry: status %d", status);
 
-       hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
-}
-
-static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
-{
-       struct hci_cp_le_set_scan_enable cp;
-
-       memset(&cp, 0, sizeof(cp));
-       cp.enable = LE_SCAN_ENABLE;
-       cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
-
-       hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+               hci_dev_lock(hdev);
+               hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+               hci_dev_unlock(hdev);
+               return;
+       }
 }
 
-static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
-                         u16 window, int timeout)
+static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
 {
-       long timeo = msecs_to_jiffies(3000);
-       struct le_scan_params param;
+       /* General inquiry access code (GIAC) */
+       u8 lap[3] = { 0x33, 0x8b, 0x9e };
+       struct hci_request req;
+       struct hci_cp_inquiry cp;
        int err;
 
-       BT_DBG("%s", hdev->name);
-
-       if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
-               return -EINPROGRESS;
-
-       param.type = type;
-       param.interval = interval;
-       param.window = window;
-
-       hci_req_lock(hdev);
-
-       err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
-                            timeo);
-       if (!err)
-               err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
-
-       hci_req_unlock(hdev);
+       if (status) {
+               BT_ERR("Failed to disable LE scanning: status %d", status);
+               return;
+       }
 
-       if (err < 0)
-               return err;
+       switch (hdev->discovery.type) {
+       case DISCOV_TYPE_LE:
+               hci_dev_lock(hdev);
+               hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+               hci_dev_unlock(hdev);
+               break;
 
-       queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
-                          timeout);
+       case DISCOV_TYPE_INTERLEAVED:
+               hci_req_init(&req, hdev);
 
-       return 0;
-}
+               memset(&cp, 0, sizeof(cp));
+               memcpy(&cp.lap, lap, sizeof(cp.lap));
+               cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
+               hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
 
-int hci_cancel_le_scan(struct hci_dev *hdev)
-{
-       BT_DBG("%s", hdev->name);
+               hci_dev_lock(hdev);
 
-       if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
-               return -EALREADY;
+               hci_inquiry_cache_flush(hdev);
 
-       if (cancel_delayed_work(&hdev->le_scan_disable)) {
-               struct hci_cp_le_set_scan_enable cp;
+               err = hci_req_run(&req, inquiry_complete);
+               if (err) {
+                       BT_ERR("Inquiry request failed: err %d", err);
+                       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+               }
 
-               /* Send HCI command to disable LE Scan */
-               memset(&cp, 0, sizeof(cp));
-               hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+               hci_dev_unlock(hdev);
+               break;
        }
-
-       return 0;
 }
 
 static void le_scan_disable_work(struct work_struct *work)
@@ -2072,46 +2057,20 @@ static void le_scan_disable_work(struct work_struct *work)
        struct hci_dev *hdev = container_of(work, struct hci_dev,
                                            le_scan_disable.work);
        struct hci_cp_le_set_scan_enable cp;
+       struct hci_request req;
+       int err;
 
        BT_DBG("%s", hdev->name);
 
-       memset(&cp, 0, sizeof(cp));
-
-       hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
-}
-
-static void le_scan_work(struct work_struct *work)
-{
-       struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
-       struct le_scan_params *param = &hdev->le_scan_params;
+       hci_req_init(&req, hdev);
 
-       BT_DBG("%s", hdev->name);
+       memset(&cp, 0, sizeof(cp));
+       cp.enable = LE_SCAN_DISABLE;
+       hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
 
-       hci_do_le_scan(hdev, param->type, param->interval, param->window,
-                      param->timeout);
-}
-
-int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
-               int timeout)
-{
-       struct le_scan_params *param = &hdev->le_scan_params;
-
-       BT_DBG("%s", hdev->name);
-
-       if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
-               return -ENOTSUPP;
-
-       if (work_busy(&hdev->le_scan))
-               return -EINPROGRESS;
-
-       param->type = type;
-       param->interval = interval;
-       param->window = window;
-       param->timeout = timeout;
-
-       queue_work(system_long_wq, &hdev->le_scan);
-
-       return 0;
+       err = hci_req_run(&req, le_scan_disable_work_complete);
+       if (err)
+               BT_ERR("Disable LE scanning request failed: err %d", err);
 }
 
 /* Alloc HCI device */
@@ -2148,7 +2107,6 @@ struct hci_dev *hci_alloc_dev(void)
        INIT_WORK(&hdev->cmd_work, hci_cmd_work);
        INIT_WORK(&hdev->tx_work, hci_tx_work);
        INIT_WORK(&hdev->power_on, hci_power_on);
-       INIT_WORK(&hdev->le_scan, le_scan_work);
 
        INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
        INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
@@ -3551,36 +3509,6 @@ static void hci_cmd_work(struct work_struct *work)
        }
 }
 
-int hci_do_inquiry(struct hci_dev *hdev, u8 length)
-{
-       /* General inquiry access code (GIAC) */
-       u8 lap[3] = { 0x33, 0x8b, 0x9e };
-       struct hci_cp_inquiry cp;
-
-       BT_DBG("%s", hdev->name);
-
-       if (test_bit(HCI_INQUIRY, &hdev->flags))
-               return -EINPROGRESS;
-
-       inquiry_cache_flush(hdev);
-
-       memset(&cp, 0, sizeof(cp));
-       memcpy(&cp.lap, lap, sizeof(cp.lap));
-       cp.length  = length;
-
-       return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
-}
-
-int hci_cancel_inquiry(struct hci_dev *hdev)
-{
-       BT_DBG("%s", hdev->name);
-
-       if (!test_bit(HCI_INQUIRY, &hdev->flags))
-               return -EALREADY;
-
-       return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
-}
-
 u8 bdaddr_to_le(u8 bdaddr_type)
 {
        switch (bdaddr_type) {
index b93cd2eb5d58f1d2cfa9f8d0ea81967166747eca..0437200d92f45ca90bb09f3cad13d15c40a851de 100644 (file)
@@ -40,21 +40,13 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
 
        BT_DBG("%s status 0x%2.2x", hdev->name, status);
 
-       if (status) {
-               hci_dev_lock(hdev);
-               mgmt_stop_discovery_failed(hdev, status);
-               hci_dev_unlock(hdev);
+       if (status)
                return;
-       }
 
        clear_bit(HCI_INQUIRY, &hdev->flags);
        smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
        wake_up_bit(&hdev->flags, HCI_INQUIRY);
 
-       hci_dev_lock(hdev);
-       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
-       hci_dev_unlock(hdev);
-
        hci_conn_check_pending(hdev);
 }
 
@@ -937,20 +929,6 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
        hci_dev_unlock(hdev);
 }
 
-static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
-{
-       __u8 status = *((__u8 *) skb->data);
-
-       BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
-       if (status) {
-               hci_dev_lock(hdev);
-               mgmt_start_discovery_failed(hdev, status);
-               hci_dev_unlock(hdev);
-               return;
-       }
-}
-
 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
                                      struct sk_buff *skb)
 {
@@ -963,41 +941,16 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
        if (!cp)
                return;
 
+       if (status)
+               return;
+
        switch (cp->enable) {
        case LE_SCAN_ENABLE:
-               if (status) {
-                       hci_dev_lock(hdev);
-                       mgmt_start_discovery_failed(hdev, status);
-                       hci_dev_unlock(hdev);
-                       return;
-               }
-
                set_bit(HCI_LE_SCAN, &hdev->dev_flags);
-
-               hci_dev_lock(hdev);
-               hci_discovery_set_state(hdev, DISCOVERY_FINDING);
-               hci_dev_unlock(hdev);
                break;
 
        case LE_SCAN_DISABLE:
-               if (status) {
-                       hci_dev_lock(hdev);
-                       mgmt_stop_discovery_failed(hdev, status);
-                       hci_dev_unlock(hdev);
-                       return;
-               }
-
                clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
-
-               if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
-                   hdev->discovery.state == DISCOVERY_FINDING) {
-                       mgmt_interleaved_discovery(hdev);
-               } else {
-                       hci_dev_lock(hdev);
-                       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
-                       hci_dev_unlock(hdev);
-               }
-
                break;
 
        default:
@@ -1077,18 +1030,10 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
 
        if (status) {
                hci_conn_check_pending(hdev);
-               hci_dev_lock(hdev);
-               if (test_bit(HCI_MGMT, &hdev->dev_flags))
-                       mgmt_start_discovery_failed(hdev, status);
-               hci_dev_unlock(hdev);
                return;
        }
 
        set_bit(HCI_INQUIRY, &hdev->flags);
-
-       hci_dev_lock(hdev);
-       hci_discovery_set_state(hdev, DISCOVERY_FINDING);
-       hci_dev_unlock(hdev);
 }
 
 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -2298,10 +2243,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_user_passkey_neg_reply(hdev, skb);
                break;
 
-       case HCI_OP_LE_SET_SCAN_PARAM:
-               hci_cc_le_set_scan_param(hdev, skb);
-               break;
-
        case HCI_OP_LE_SET_ADV_ENABLE:
                hci_cc_le_set_adv_enable(hdev, skb);
                break;
@@ -2670,7 +2611,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        BT_DBG("%s", hdev->name);
 
-       if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
+       if (!test_bit(HCI_MGMT, &hdev->dev_flags))
                return;
 
        hci_dev_lock(hdev);
@@ -2746,7 +2687,7 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_conn_drop(conn);
        }
 
-       if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
+       if (test_bit(HCI_MGMT, &hdev->dev_flags))
                hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
                                 ev->key_type, pin_len);
 
index 940f5acb6694c3901c0607df32ecc8fef1e5f224..f13a8da441a82571d10c04a3dd6d14caa2f65c0c 100644 (file)
@@ -76,25 +76,19 @@ static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo
        ci->flags = session->flags;
        ci->state = BT_CONNECTED;
 
-       ci->vendor  = 0x0000;
-       ci->product = 0x0000;
-       ci->version = 0x0000;
-
        if (session->input) {
                ci->vendor  = session->input->id.vendor;
                ci->product = session->input->id.product;
                ci->version = session->input->id.version;
                if (session->input->name)
-                       strncpy(ci->name, session->input->name, 128);
+                       strlcpy(ci->name, session->input->name, 128);
                else
-                       strncpy(ci->name, "HID Boot Device", 128);
-       }
-
-       if (session->hid) {
+                       strlcpy(ci->name, "HID Boot Device", 128);
+       } else if (session->hid) {
                ci->vendor  = session->hid->vendor;
                ci->product = session->hid->product;
                ci->version = session->hid->version;
-               strncpy(ci->name, session->hid->name, 128);
+               strlcpy(ci->name, session->hid->name, 128);
        }
 }
 
index 68843a28a7af6a3e52f92f0cde033e03b031560a..8c3499bec89319289073b2f1e7f3f202cd179ca6 100644 (file)
@@ -504,8 +504,10 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
                if (conn->hcon->type == LE_LINK) {
                        /* LE connection */
                        chan->omtu = L2CAP_DEFAULT_MTU;
-                       chan->scid = L2CAP_CID_LE_DATA;
-                       chan->dcid = L2CAP_CID_LE_DATA;
+                       if (chan->dcid == L2CAP_CID_ATT)
+                               chan->scid = L2CAP_CID_ATT;
+                       else
+                               chan->scid = l2cap_alloc_cid(conn);
                } else {
                        /* Alloc CID for connection-oriented socket */
                        chan->scid = l2cap_alloc_cid(conn);
@@ -543,6 +545,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
 
        l2cap_chan_hold(chan);
 
+       hci_conn_hold(conn->hcon);
+
        list_add(&chan->list, &conn->chan_l);
 }
 
@@ -1338,17 +1342,21 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
 
 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
 {
-       struct sock *parent, *sk;
+       struct sock *parent;
        struct l2cap_chan *chan, *pchan;
 
        BT_DBG("");
 
        /* Check if we have socket listening on cid */
-       pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
+       pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
                                          conn->src, conn->dst);
        if (!pchan)
                return;
 
+       /* Client ATT sockets should override the server one */
+       if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
+               return;
+
        parent = pchan->sk;
 
        lock_sock(parent);
@@ -1357,17 +1365,12 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
        if (!chan)
                goto clean;
 
-       sk = chan->sk;
-
-       hci_conn_hold(conn->hcon);
-       conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
-
-       bacpy(&bt_sk(sk)->src, conn->src);
-       bacpy(&bt_sk(sk)->dst, conn->dst);
+       chan->dcid = L2CAP_CID_ATT;
 
-       l2cap_chan_add(conn, chan);
+       bacpy(&bt_sk(chan->sk)->src, conn->src);
+       bacpy(&bt_sk(chan->sk)->dst, conn->dst);
 
-       l2cap_chan_ready(chan);
+       __l2cap_chan_add(conn, chan);
 
 clean:
        release_sock(parent);
@@ -1380,14 +1383,17 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
 
        BT_DBG("conn %p", conn);
 
-       if (!hcon->out && hcon->type == LE_LINK)
-               l2cap_le_conn_ready(conn);
-
+       /* For outgoing pairing which doesn't necessarily have an
+        * associated socket (e.g. mgmt_pair_device).
+        */
        if (hcon->out && hcon->type == LE_LINK)
                smp_conn_security(hcon, hcon->pending_sec_level);
 
        mutex_lock(&conn->chan_lock);
 
+       if (hcon->type == LE_LINK)
+               l2cap_le_conn_ready(conn);
+
        list_for_each_entry(chan, &conn->chan_l, list) {
 
                l2cap_chan_lock(chan);
@@ -1792,7 +1798,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
 
        auth_type = l2cap_get_auth_type(chan);
 
-       if (chan->dcid == L2CAP_CID_LE_DATA)
+       if (bdaddr_type_is_le(dst_type))
                hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
                                   chan->sec_level, auth_type);
        else
@@ -1811,16 +1817,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
                goto done;
        }
 
-       if (hcon->type == LE_LINK) {
-               err = 0;
-
-               if (!list_empty(&conn->chan_l)) {
-                       err = -EBUSY;
-                       hci_conn_drop(hcon);
-               }
-
-               if (err)
-                       goto done;
+       if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
+               hci_conn_drop(hcon);
+               err = -EBUSY;
+               goto done;
        }
 
        /* Update source addr of the socket */
@@ -1830,6 +1830,9 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
        l2cap_chan_add(conn, chan);
        l2cap_chan_lock(chan);
 
+       /* l2cap_chan_add takes its own ref so we can drop this one */
+       hci_conn_drop(hcon);
+
        l2cap_state_change(chan, BT_CONNECT);
        __set_chan_timer(chan, sk->sk_sndtimeo);
 
@@ -3751,8 +3754,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
 
        sk = chan->sk;
 
-       hci_conn_hold(conn->hcon);
-
        bacpy(&bt_sk(sk)->src, conn->src);
        bacpy(&bt_sk(sk)->dst, conn->dst);
        chan->psm  = psm;
@@ -5292,6 +5293,51 @@ static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
        }
 }
 
+static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
+                                       struct sk_buff *skb)
+{
+       u8 *data = skb->data;
+       int len = skb->len;
+       struct l2cap_cmd_hdr cmd;
+       int err;
+
+       l2cap_raw_recv(conn, skb);
+
+       while (len >= L2CAP_CMD_HDR_SIZE) {
+               u16 cmd_len;
+               memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
+               data += L2CAP_CMD_HDR_SIZE;
+               len  -= L2CAP_CMD_HDR_SIZE;
+
+               cmd_len = le16_to_cpu(cmd.len);
+
+               BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
+                      cmd.ident);
+
+               if (cmd_len > len || !cmd.ident) {
+                       BT_DBG("corrupted command");
+                       break;
+               }
+
+               err = l2cap_le_sig_cmd(conn, &cmd, data);
+               if (err) {
+                       struct l2cap_cmd_rej_unk rej;
+
+                       BT_ERR("Wrong link type (%d)", err);
+
+                       /* FIXME: Map err to a valid reason */
+                       rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+                       l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
+                                      sizeof(rej), &rej);
+               }
+
+               data += cmd_len;
+               len  -= cmd_len;
+       }
+
+       kfree_skb(skb);
+}
+
 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
                                     struct sk_buff *skb)
 {
@@ -5318,11 +5364,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
                        break;
                }
 
-               if (conn->hcon->type == LE_LINK)
-                       err = l2cap_le_sig_cmd(conn, &cmd, data);
-               else
-                       err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
-
+               err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
                if (err) {
                        struct l2cap_cmd_rej_unk rej;
 
@@ -6356,16 +6398,13 @@ static void l2cap_att_channel(struct l2cap_conn *conn,
 {
        struct l2cap_chan *chan;
 
-       chan = l2cap_global_chan_by_scid(0, L2CAP_CID_LE_DATA,
+       chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
                                         conn->src, conn->dst);
        if (!chan)
                goto drop;
 
        BT_DBG("chan %p, len %d", chan, skb->len);
 
-       if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
-               goto drop;
-
        if (chan->imtu < skb->len)
                goto drop;
 
@@ -6395,6 +6434,8 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
 
        switch (cid) {
        case L2CAP_CID_LE_SIGNALING:
+               l2cap_le_sig_channel(conn, skb);
+               break;
        case L2CAP_CID_SIGNALING:
                l2cap_sig_channel(conn, skb);
                break;
@@ -6405,7 +6446,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
                l2cap_conless_channel(conn, psm, skb);
                break;
 
-       case L2CAP_CID_LE_DATA:
+       case L2CAP_CID_ATT:
                l2cap_att_channel(conn, skb);
                break;
 
@@ -6531,7 +6572,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
                        continue;
                }
 
-               if (chan->scid == L2CAP_CID_LE_DATA) {
+               if (chan->scid == L2CAP_CID_ATT) {
                        if (!status && encrypt) {
                                chan->sec_level = hcon->sec_level;
                                l2cap_chan_ready(chan);
index 36fed40c162cff5361c9dbdf9cfd5f313e47743a..0098af80b21327045c99a38771ed8c95da2544c2 100644 (file)
@@ -466,7 +466,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
 static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
 {
        switch (chan->scid) {
-       case L2CAP_CID_LE_DATA:
+       case L2CAP_CID_ATT:
                if (mtu < L2CAP_LE_MIN_MTU)
                        return false;
                break;
@@ -630,7 +630,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                conn = chan->conn;
 
                /*change security for LE channels */
-               if (chan->scid == L2CAP_CID_LE_DATA) {
+               if (chan->scid == L2CAP_CID_ATT) {
                        if (!conn->hcon->out) {
                                err = -EINVAL;
                                break;
index f8ecbc70293d1d0c0b82101c4378a4e9ea8be1bd..fedc5399d4658687e7646f3768d8dd47f2017e33 100644 (file)
@@ -102,18 +102,6 @@ static const u16 mgmt_events[] = {
        MGMT_EV_PASSKEY_NOTIFY,
 };
 
-/*
- * These LE scan and inquiry parameters were chosen according to LE General
- * Discovery Procedure specification.
- */
-#define LE_SCAN_WIN                    0x12
-#define LE_SCAN_INT                    0x12
-#define LE_SCAN_TIMEOUT_LE_ONLY                msecs_to_jiffies(10240)
-#define LE_SCAN_TIMEOUT_BREDR_LE       msecs_to_jiffies(5120)
-
-#define INQUIRY_LEN_BREDR              0x08    /* TGAP(100) */
-#define INQUIRY_LEN_BREDR_LE           0x04    /* TGAP(100)/2 */
-
 #define CACHE_TIMEOUT  msecs_to_jiffies(2 * 1000)
 
 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
@@ -1748,8 +1736,6 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
 
        hci_link_keys_clear(hdev);
 
-       set_bit(HCI_LINK_KEYS, &hdev->dev_flags);
-
        if (cp->debug_keys)
                set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
        else
@@ -2633,28 +2619,72 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
        return err;
 }
 
-int mgmt_interleaved_discovery(struct hci_dev *hdev)
+static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
 {
+       struct pending_cmd *cmd;
+       u8 type;
        int err;
 
-       BT_DBG("%s", hdev->name);
+       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 
-       hci_dev_lock(hdev);
+       cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
+       if (!cmd)
+               return -ENOENT;
 
-       err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR_LE);
-       if (err < 0)
-               hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+       type = hdev->discovery.type;
 
-       hci_dev_unlock(hdev);
+       err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
+                          &type, sizeof(type));
+       mgmt_pending_remove(cmd);
 
        return err;
 }
 
+static void start_discovery_complete(struct hci_dev *hdev, u8 status)
+{
+       BT_DBG("status %d", status);
+
+       if (status) {
+               hci_dev_lock(hdev);
+               mgmt_start_discovery_failed(hdev, status);
+               hci_dev_unlock(hdev);
+               return;
+       }
+
+       hci_dev_lock(hdev);
+       hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+       hci_dev_unlock(hdev);
+
+       switch (hdev->discovery.type) {
+       case DISCOV_TYPE_LE:
+               queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
+                                  DISCOV_LE_TIMEOUT);
+               break;
+
+       case DISCOV_TYPE_INTERLEAVED:
+               queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
+                                  DISCOV_INTERLEAVED_TIMEOUT);
+               break;
+
+       case DISCOV_TYPE_BREDR:
+               break;
+
+       default:
+               BT_ERR("Invalid discovery type %d", hdev->discovery.type);
+       }
+}
+
 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
                           void *data, u16 len)
 {
        struct mgmt_cp_start_discovery *cp = data;
        struct pending_cmd *cmd;
+       struct hci_cp_le_set_scan_param param_cp;
+       struct hci_cp_le_set_scan_enable enable_cp;
+       struct hci_cp_inquiry inq_cp;
+       struct hci_request req;
+       /* General inquiry access code (GIAC) */
+       u8 lap[3] = { 0x33, 0x8b, 0x9e };
        int err;
 
        BT_DBG("%s", hdev->name);
@@ -2687,6 +2717,8 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
 
        hdev->discovery.type = cp->type;
 
+       hci_req_init(&req, hdev);
+
        switch (hdev->discovery.type) {
        case DISCOV_TYPE_BREDR:
                if (!lmp_bredr_capable(hdev)) {
@@ -2696,10 +2728,23 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
                        goto failed;
                }
 
-               err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR);
+               if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+                       err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+                                        MGMT_STATUS_BUSY);
+                       mgmt_pending_remove(cmd);
+                       goto failed;
+               }
+
+               hci_inquiry_cache_flush(hdev);
+
+               memset(&inq_cp, 0, sizeof(inq_cp));
+               memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
+               inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
+               hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
                break;
 
        case DISCOV_TYPE_LE:
+       case DISCOV_TYPE_INTERLEAVED:
                if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
                        err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
                                         MGMT_STATUS_NOT_SUPPORTED);
@@ -2707,20 +2752,40 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
                        goto failed;
                }
 
-               err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
-                                 LE_SCAN_WIN, LE_SCAN_TIMEOUT_LE_ONLY);
-               break;
-
-       case DISCOV_TYPE_INTERLEAVED:
-               if (!lmp_host_le_capable(hdev) || !lmp_bredr_capable(hdev)) {
+               if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
+                   !lmp_bredr_capable(hdev)) {
                        err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
                                         MGMT_STATUS_NOT_SUPPORTED);
                        mgmt_pending_remove(cmd);
                        goto failed;
                }
 
-               err = hci_le_scan(hdev, LE_SCAN_ACTIVE, LE_SCAN_INT,
-                                 LE_SCAN_WIN, LE_SCAN_TIMEOUT_BREDR_LE);
+               if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
+                       err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+                                        MGMT_STATUS_REJECTED);
+                       mgmt_pending_remove(cmd);
+                       goto failed;
+               }
+
+               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
+                       err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
+                                        MGMT_STATUS_BUSY);
+                       mgmt_pending_remove(cmd);
+                       goto failed;
+               }
+
+               memset(&param_cp, 0, sizeof(param_cp));
+               param_cp.type = LE_SCAN_ACTIVE;
+               param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
+               param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
+               hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
+                           &param_cp);
+
+               memset(&enable_cp, 0, sizeof(enable_cp));
+               enable_cp.enable = LE_SCAN_ENABLE;
+               enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+               hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
+                           &enable_cp);
                break;
 
        default:
@@ -2730,6 +2795,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
                goto failed;
        }
 
+       err = hci_req_run(&req, start_discovery_complete);
        if (err < 0)
                mgmt_pending_remove(cmd);
        else
@@ -2740,6 +2806,39 @@ failed:
        return err;
 }
 
+static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
+{
+       struct pending_cmd *cmd;
+       int err;
+
+       cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
+       if (!cmd)
+               return -ENOENT;
+
+       err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
+                          &hdev->discovery.type, sizeof(hdev->discovery.type));
+       mgmt_pending_remove(cmd);
+
+       return err;
+}
+
+static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
+{
+       BT_DBG("status %d", status);
+
+       hci_dev_lock(hdev);
+
+       if (status) {
+               mgmt_stop_discovery_failed(hdev, status);
+               goto unlock;
+       }
+
+       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
                          u16 len)
 {
@@ -2747,6 +2846,8 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
        struct pending_cmd *cmd;
        struct hci_cp_remote_name_req_cancel cp;
        struct inquiry_entry *e;
+       struct hci_request req;
+       struct hci_cp_le_set_scan_enable enable_cp;
        int err;
 
        BT_DBG("%s", hdev->name);
@@ -2773,12 +2874,20 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
                goto unlock;
        }
 
+       hci_req_init(&req, hdev);
+
        switch (hdev->discovery.state) {
        case DISCOVERY_FINDING:
-               if (test_bit(HCI_INQUIRY, &hdev->flags))
-                       err = hci_cancel_inquiry(hdev);
-               else
-                       err = hci_cancel_le_scan(hdev);
+               if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+                       hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+               } else {
+                       cancel_delayed_work(&hdev->le_scan_disable);
+
+                       memset(&enable_cp, 0, sizeof(enable_cp));
+                       enable_cp.enable = LE_SCAN_DISABLE;
+                       hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
+                                   sizeof(enable_cp), &enable_cp);
+               }
 
                break;
 
@@ -2796,16 +2905,22 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
                }
 
                bacpy(&cp.bdaddr, &e->data.bdaddr);
-               err = hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
-                                  sizeof(cp), &cp);
+               hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
+                           &cp);
 
                break;
 
        default:
                BT_DBG("unknown discovery state %u", hdev->discovery.state);
-               err = -EFAULT;
+
+               mgmt_pending_remove(cmd);
+               err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
+                                  MGMT_STATUS_FAILED, &mgmt_cp->type,
+                                  sizeof(mgmt_cp->type));
+               goto unlock;
        }
 
+       err = hci_req_run(&req, stop_discovery_complete);
        if (err < 0)
                mgmt_pending_remove(cmd);
        else
@@ -4063,6 +4178,9 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
        struct mgmt_ev_device_found *ev = (void *) buf;
        size_t ev_size;
 
+       if (!hci_discovery_active(hdev))
+               return -EPERM;
+
        /* Leave 5 bytes for a potential CoD field */
        if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
                return -EINVAL;
@@ -4114,43 +4232,6 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                          sizeof(*ev) + eir_len, NULL);
 }
 
-int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
-{
-       struct pending_cmd *cmd;
-       u8 type;
-       int err;
-
-       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
-
-       cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
-       if (!cmd)
-               return -ENOENT;
-
-       type = hdev->discovery.type;
-
-       err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
-                          &type, sizeof(type));
-       mgmt_pending_remove(cmd);
-
-       return err;
-}
-
-int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
-{
-       struct pending_cmd *cmd;
-       int err;
-
-       cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
-       if (!cmd)
-               return -ENOENT;
-
-       err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
-                          &hdev->discovery.type, sizeof(hdev->discovery.type));
-       mgmt_pending_remove(cmd);
-
-       return err;
-}
-
 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
 {
        struct mgmt_ev_discovering ev;
index 0785e95c992442e52ab2f7ce473ff65ecbdbdb9e..be7614b9ed27b7827cbae7a423e5957ec1302c25 100644 (file)
@@ -85,7 +85,7 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
                        *cpos++ = *pos++ ^ e[i];
        }
 
-       for (i = 0; i < CCMP_MIC_LEN; i++)
+       for (i = 0; i < IEEE80211_CCMP_MIC_LEN; i++)
                mic[i] = b[i] ^ s_0[i];
 }
 
@@ -123,7 +123,7 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
                crypto_cipher_encrypt_one(tfm, a, a);
        }
 
-       for (i = 0; i < CCMP_MIC_LEN; i++) {
+       for (i = 0; i < IEEE80211_CCMP_MIC_LEN; i++) {
                if ((mic[i] ^ s_0[i]) != a[i])
                        return -1;
        }
@@ -138,7 +138,7 @@ struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[])
 
        tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
        if (!IS_ERR(tfm))
-               crypto_cipher_setkey(tfm, key, ALG_CCMP_KEY_LEN);
+               crypto_cipher_setkey(tfm, key, WLAN_KEY_LEN_CCMP);
 
        return tfm;
 }
index 4fdb306e42e0c1724f7a843b6cc77239d6b065bb..8184d121ff0904a285529ff234f75c3f15f522e5 100644 (file)
@@ -73,16 +73,19 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
                struct ieee80211_local *local = sdata->local;
 
                if (ieee80211_sdata_running(sdata)) {
+                       u32 mask = MONITOR_FLAG_COOK_FRAMES |
+                                  MONITOR_FLAG_ACTIVE;
+
                        /*
-                        * Prohibit MONITOR_FLAG_COOK_FRAMES to be
-                        * changed while the interface is up.
+                        * Prohibit MONITOR_FLAG_COOK_FRAMES and
+                        * MONITOR_FLAG_ACTIVE to be changed while the
+                        * interface is up.
                         * Else we would need to add a lot of cruft
                         * to update everything:
                         *      cooked_mntrs, monitor and all fif_* counters
                         *      reconfigure hardware
                         */
-                       if ((*flags & MONITOR_FLAG_COOK_FRAMES) !=
-                           (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
+                       if ((*flags & mask) != (sdata->u.mntr_flags & mask))
                                return -EBUSY;
 
                        ieee80211_adjust_monitor_flags(sdata, -1);
@@ -444,7 +447,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
        struct ieee80211_local *local = sdata->local;
        struct timespec uptime;
        u64 packets = 0;
-       int ac;
+       int i, ac;
 
        sinfo->generation = sdata->local->sta_generation;
 
@@ -488,6 +491,17 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                        sinfo->signal = (s8)sta->last_signal;
                sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
        }
+       if (sta->chains) {
+               sinfo->filled |= STATION_INFO_CHAIN_SIGNAL |
+                                STATION_INFO_CHAIN_SIGNAL_AVG;
+
+               sinfo->chains = sta->chains;
+               for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
+                       sinfo->chain_signal[i] = sta->chain_signal_last[i];
+                       sinfo->chain_signal_avg[i] =
+                               (s8) -ewma_read(&sta->chain_signal_avg[i]);
+               }
+       }
 
        sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
        sta_set_rate_info_rx(sta, &sinfo->rxrate);
@@ -728,7 +742,7 @@ static void ieee80211_get_et_strings(struct wiphy *wiphy,
 
        if (sset == ETH_SS_STATS) {
                sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats);
-               memcpy(data, *ieee80211_gstrings_sta_stats, sz_sta_stats);
+               memcpy(data, ieee80211_gstrings_sta_stats, sz_sta_stats);
        }
        drv_get_et_strings(sdata, sset, &(data[sz_sta_stats]));
 }
@@ -1741,6 +1755,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
        ifmsh->mesh_pp_id = setup->path_sel_proto;
        ifmsh->mesh_pm_id = setup->path_metric;
        ifmsh->user_mpm = setup->user_mpm;
+       ifmsh->mesh_auth_id = setup->auth_id;
        ifmsh->security = IEEE80211_MESH_SEC_NONE;
        if (setup->is_authenticated)
                ifmsh->security |= IEEE80211_MESH_SEC_AUTHED;
@@ -1750,6 +1765,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
        /* mcast rate setting in Mesh Node */
        memcpy(sdata->vif.bss_conf.mcast_rate, setup->mcast_rate,
                                                sizeof(setup->mcast_rate));
+       sdata->vif.bss_conf.basic_rates = setup->basic_rates;
 
        sdata->vif.bss_conf.beacon_int = setup->beacon_interval;
        sdata->vif.bss_conf.dtim_period = setup->dtim_period;
@@ -1862,6 +1878,8 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
        if (_chg_mesh_attr(NL80211_MESHCONF_AWAKE_WINDOW, mask))
                conf->dot11MeshAwakeWindowDuration =
                        nconf->dot11MeshAwakeWindowDuration;
+       if (_chg_mesh_attr(NL80211_MESHCONF_PLINK_TIMEOUT, mask))
+               conf->plink_timeout = nconf->plink_timeout;
        ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON);
        return 0;
 }
@@ -2312,7 +2330,7 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
        enum ieee80211_smps_mode old_req;
        int err;
 
-       lockdep_assert_held(&sdata->u.mgd.mtx);
+       lockdep_assert_held(&sdata->wdev.mtx);
 
        old_req = sdata->u.mgd.req_smps;
        sdata->u.mgd.req_smps = smps_mode;
@@ -2369,9 +2387,9 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
        local->dynamic_ps_forced_timeout = timeout;
 
        /* no change, but if automatic follow powersave */
-       mutex_lock(&sdata->u.mgd.mtx);
+       sdata_lock(sdata);
        __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps);
-       mutex_unlock(&sdata->u.mgd.mtx);
+       sdata_unlock(sdata);
 
        if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
                ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
@@ -2809,7 +2827,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                    !rcu_access_pointer(sdata->bss->beacon))
                        need_offchan = true;
                if (!ieee80211_is_action(mgmt->frame_control) ||
-                   mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)
+                   mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
+                   mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED)
                        break;
                rcu_read_lock();
                sta = sta_info_get(sdata, mgmt->da);
@@ -2829,6 +2848,12 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                return -EOPNOTSUPP;
        }
 
+       /* configurations requiring offchan cannot work if no channel has been
+        * specified
+        */
+       if (need_offchan && !chan)
+               return -EINVAL;
+
        mutex_lock(&local->mtx);
 
        /* Check if the operating channel is the requested channel */
@@ -2838,10 +2863,15 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                rcu_read_lock();
                chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
 
-               if (chanctx_conf)
-                       need_offchan = chan != chanctx_conf->def.chan;
-               else
+               if (chanctx_conf) {
+                       need_offchan = chan && (chan != chanctx_conf->def.chan);
+               } else if (!chan) {
+                       ret = -EINVAL;
+                       rcu_read_unlock();
+                       goto out_unlock;
+               } else {
                        need_offchan = true;
+               }
                rcu_read_unlock();
        }
 
@@ -2901,19 +2931,8 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
                                          u16 frame_type, bool reg)
 {
        struct ieee80211_local *local = wiphy_priv(wiphy);
-       struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 
        switch (frame_type) {
-       case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH:
-               if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
-                       struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
-
-                       if (reg)
-                               ifibss->auth_frame_registrations++;
-                       else
-                               ifibss->auth_frame_registrations--;
-               }
-               break;
        case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ:
                if (reg)
                        local->probe_req_reg++;
index 14abcf44f974309de5495908d413eeeae2f3595e..cafe614ef93d672087fb66a1d82aaff4110cc22c 100644 (file)
@@ -228,9 +228,9 @@ static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
        if (sdata->vif.type != NL80211_IFTYPE_STATION)
                return -EOPNOTSUPP;
 
-       mutex_lock(&sdata->u.mgd.mtx);
+       sdata_lock(sdata);
        err = __ieee80211_request_smps(sdata, smps_mode);
-       mutex_unlock(&sdata->u.mgd.mtx);
+       sdata_unlock(sdata);
 
        return err;
 }
@@ -313,16 +313,16 @@ static ssize_t ieee80211_if_parse_tkip_mic_test(
        case NL80211_IFTYPE_STATION:
                fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
                /* BSSID SA DA */
-               mutex_lock(&sdata->u.mgd.mtx);
+               sdata_lock(sdata);
                if (!sdata->u.mgd.associated) {
-                       mutex_unlock(&sdata->u.mgd.mtx);
+                       sdata_unlock(sdata);
                        dev_kfree_skb(skb);
                        return -ENOTCONN;
                }
                memcpy(hdr->addr1, sdata->u.mgd.associated->bssid, ETH_ALEN);
                memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
                memcpy(hdr->addr3, addr, ETH_ALEN);
-               mutex_unlock(&sdata->u.mgd.mtx);
+               sdata_unlock(sdata);
                break;
        default:
                dev_kfree_skb(skb);
@@ -471,6 +471,8 @@ __IEEE80211_IF_FILE_W(tsf);
 IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
 
 #ifdef CONFIG_MAC80211_MESH
+IEEE80211_IF_FILE(estab_plinks, u.mesh.estab_plinks, ATOMIC);
+
 /* Mesh stats attributes */
 IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
 IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
@@ -480,7 +482,6 @@ IEEE80211_IF_FILE(dropped_frames_congestion,
                  u.mesh.mshstats.dropped_frames_congestion, DEC);
 IEEE80211_IF_FILE(dropped_frames_no_route,
                  u.mesh.mshstats.dropped_frames_no_route, DEC);
-IEEE80211_IF_FILE(estab_plinks, u.mesh.estab_plinks, ATOMIC);
 
 /* Mesh parameters */
 IEEE80211_IF_FILE(dot11MeshMaxRetries,
@@ -583,6 +584,7 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata)
 static void add_mesh_files(struct ieee80211_sub_if_data *sdata)
 {
        DEBUGFS_ADD_MODE(tsf, 0600);
+       DEBUGFS_ADD_MODE(estab_plinks, 0400);
 }
 
 static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
@@ -598,7 +600,6 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
        MESHSTATS_ADD(dropped_frames_ttl);
        MESHSTATS_ADD(dropped_frames_no_route);
        MESHSTATS_ADD(dropped_frames_congestion);
-       MESHSTATS_ADD(estab_plinks);
 #undef MESHSTATS_ADD
 }
 
index 169664c122e23cbe80f89016714bdcdfec24dc6e..b931c96a596fb3e92e9effa845ab10f74144dfd1 100644 (file)
@@ -146,7 +146,8 @@ static inline int drv_add_interface(struct ieee80211_local *local,
 
        if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
                    (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
-                    !(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))))
+                    !(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF) &&
+                    !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))))
                return -EINVAL;
 
        trace_drv_add_interface(local, sdata);
index af8cee06e4f37475ed17c764e55cf75c070ef9f2..f83534f6a2eec14abcb237196e4d6a233e2f8136 100644 (file)
@@ -281,13 +281,14 @@ void ieee80211_ba_session_work(struct work_struct *work)
                                sta, tid, WLAN_BACK_RECIPIENT,
                                WLAN_REASON_UNSPECIFIED, true);
 
+               spin_lock_bh(&sta->lock);
+
                tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
                if (tid_tx) {
                        /*
                         * Assign it over to the normal tid_tx array
                         * where it "goes live".
                         */
-                       spin_lock_bh(&sta->lock);
 
                        sta->ampdu_mlme.tid_start_tx[tid] = NULL;
                        /* could there be a race? */
@@ -300,6 +301,7 @@ void ieee80211_ba_session_work(struct work_struct *work)
                        ieee80211_tx_ba_session_handle_start(sta, tid);
                        continue;
                }
+               spin_unlock_bh(&sta->lock);
 
                tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
                if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
@@ -429,9 +431,9 @@ void ieee80211_request_smps_work(struct work_struct *work)
                container_of(work, struct ieee80211_sub_if_data,
                             u.mgd.request_smps_work);
 
-       mutex_lock(&sdata->u.mgd.mtx);
+       sdata_lock(sdata);
        __ieee80211_request_smps(sdata, sdata->u.mgd.driver_smps_mode);
-       mutex_unlock(&sdata->u.mgd.mtx);
+       sdata_unlock(sdata);
 }
 
 void ieee80211_request_smps(struct ieee80211_vif *vif,
index 170f9a7fa3190afee4148bb4caf0c817a53b7991..ea7b9c2c7e66db19a811244d82e907bdb79620f1 100644 (file)
@@ -54,7 +54,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        struct beacon_data *presp;
        int frame_len;
 
-       lockdep_assert_held(&ifibss->mtx);
+       sdata_assert_lock(sdata);
 
        /* Reset own TSF to allow time synchronization work. */
        drv_reset_tsf(local, sdata);
@@ -74,14 +74,14 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        }
 
        presp = rcu_dereference_protected(ifibss->presp,
-                                         lockdep_is_held(&ifibss->mtx));
+                                         lockdep_is_held(&sdata->wdev.mtx));
        rcu_assign_pointer(ifibss->presp, NULL);
        if (presp)
                kfree_rcu(presp, rcu_head);
 
        sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
 
-       cfg80211_chandef_create(&chandef, chan, ifibss->channel_type);
+       chandef = ifibss->chandef;
        if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
                chandef.width = NL80211_CHAN_WIDTH_20;
                chandef.center_freq1 = chan->center_freq;
@@ -176,6 +176,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
 
        /* add HT capability and information IEs */
        if (chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
+           chandef.width != NL80211_CHAN_WIDTH_5 &&
+           chandef.width != NL80211_CHAN_WIDTH_10 &&
            sband->ht_cap.ht_supported) {
                pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
                                                sband->ht_cap.cap);
@@ -263,7 +265,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        const struct cfg80211_bss_ies *ies;
        u64 tsf;
 
-       lockdep_assert_held(&sdata->u.ibss.mtx);
+       sdata_assert_lock(sdata);
 
        if (beacon_int < 10)
                beacon_int = 10;
@@ -298,8 +300,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                                  tsf, false);
 }
 
-static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
-                                                 bool auth)
+static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
        __acquires(RCU)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -321,26 +322,19 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
        /* If it fails, maybe we raced another insertion? */
        if (sta_info_insert_rcu(sta))
                return sta_info_get(sdata, addr);
-       if (auth && !sdata->u.ibss.auth_frame_registrations) {
-               ibss_dbg(sdata,
-                        "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
-                        sdata->vif.addr, addr, sdata->u.ibss.bssid);
-               ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, 0, NULL, 0,
-                                   addr, sdata->u.ibss.bssid, NULL, 0, 0, 0);
-       }
        return sta;
 }
 
 static struct sta_info *
-ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
-                      const u8 *bssid, const u8 *addr,
-                      u32 supp_rates, bool auth)
+ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
+                      const u8 *addr, u32 supp_rates)
        __acquires(RCU)
 {
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        struct ieee80211_chanctx_conf *chanctx_conf;
+       struct ieee80211_supported_band *sband;
        int band;
 
        /*
@@ -380,10 +374,11 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
        sta->last_rx = jiffies;
 
        /* make sure mandatory rates are always added */
+       sband = local->hw.wiphy->bands[band];
        sta->sta.supp_rates[band] = supp_rates |
-                       ieee80211_mandatory_rates(local, band);
+                       ieee80211_mandatory_rates(sband);
 
-       return ieee80211_ibss_finish_sta(sta, auth);
+       return ieee80211_ibss_finish_sta(sta);
 }
 
 static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
@@ -405,10 +400,8 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
                                        size_t len)
 {
        u16 auth_alg, auth_transaction;
-       struct sta_info *sta;
-       u8 deauth_frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
-       lockdep_assert_held(&sdata->u.ibss.mtx);
+       sdata_assert_lock(sdata);
 
        if (len < 24 + 6)
                return;
@@ -423,22 +416,6 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
        if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
                return;
 
-       sta_info_destroy_addr(sdata, mgmt->sa);
-       sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
-       rcu_read_unlock();
-
-       /*
-        * if we have any problem in allocating the new station, we reply with a
-        * DEAUTH frame to tell the other end that we had a problem
-        */
-       if (!sta) {
-               ieee80211_send_deauth_disassoc(sdata, sdata->u.ibss.bssid,
-                                              IEEE80211_STYPE_DEAUTH,
-                                              WLAN_REASON_UNSPECIFIED, true,
-                                              deauth_frame_buf);
-               return;
-       }
-
        /*
         * IEEE 802.11 standard does not require authentication in IBSS
         * networks and most implementations do not seem to use it.
@@ -492,7 +469,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                                prev_rates = sta->sta.supp_rates[band];
                                /* make sure mandatory rates are always added */
                                sta->sta.supp_rates[band] = supp_rates |
-                                       ieee80211_mandatory_rates(local, band);
+                                       ieee80211_mandatory_rates(sband);
 
                                if (sta->sta.supp_rates[band] != prev_rates) {
                                        ibss_dbg(sdata,
@@ -504,7 +481,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                        } else {
                                rcu_read_unlock();
                                sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid,
-                                               mgmt->sa, supp_rates, true);
+                                               mgmt->sa, supp_rates);
                        }
                }
 
@@ -512,7 +489,9 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                        set_sta_flag(sta, WLAN_STA_WME);
 
                if (sta && elems->ht_operation && elems->ht_cap_elem &&
-                   sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
+                   sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
+                   sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_5 &&
+                   sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_10) {
                        /* we both use HT */
                        struct ieee80211_ht_cap htcap_ie;
                        struct cfg80211_chan_def chandef;
@@ -527,8 +506,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                         * fall back to HT20 if we don't use or use
                         * the other extension channel
                         */
-                       if (cfg80211_get_chandef_type(&chandef) !=
-                                               sdata->u.ibss.channel_type)
+                       if (chandef.center_freq1 !=
+                           sdata->u.ibss.chandef.center_freq1)
                                htcap_ie.cap_info &=
                                        cpu_to_le16(~IEEE80211_HT_CAP_SUP_WIDTH_20_40);
 
@@ -567,7 +546,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
 
        /* different channel */
        if (sdata->u.ibss.fixed_channel &&
-           sdata->u.ibss.channel != cbss->channel)
+           sdata->u.ibss.chandef.chan != cbss->channel)
                goto put_bss;
 
        /* different SSID */
@@ -608,7 +587,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                ieee80211_sta_join_ibss(sdata, bss);
                supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
                ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
-                                      supp_rates, true);
+                                      supp_rates);
                rcu_read_unlock();
        }
 
@@ -624,6 +603,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        struct ieee80211_chanctx_conf *chanctx_conf;
+       struct ieee80211_supported_band *sband;
        int band;
 
        /*
@@ -658,8 +638,9 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
        sta->last_rx = jiffies;
 
        /* make sure mandatory rates are always added */
+       sband = local->hw.wiphy->bands[band];
        sta->sta.supp_rates[band] = supp_rates |
-                       ieee80211_mandatory_rates(local, band);
+                       ieee80211_mandatory_rates(sband);
 
        spin_lock(&ifibss->incomplete_lock);
        list_add(&sta->list, &ifibss->incomplete_stations);
@@ -673,7 +654,7 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
        int active = 0;
        struct sta_info *sta;
 
-       lockdep_assert_held(&sdata->u.ibss.mtx);
+       sdata_assert_lock(sdata);
 
        rcu_read_lock();
 
@@ -699,7 +680,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 
-       lockdep_assert_held(&ifibss->mtx);
+       sdata_assert_lock(sdata);
 
        mod_timer(&ifibss->timer,
                  round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
@@ -730,7 +711,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
        u16 capability;
        int i;
 
-       lockdep_assert_held(&ifibss->mtx);
+       sdata_assert_lock(sdata);
 
        if (ifibss->fixed_bssid) {
                memcpy(bssid, ifibss->bssid, ETH_ALEN);
@@ -755,7 +736,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
                sdata->drop_unencrypted = 0;
 
        __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
-                                 ifibss->channel, ifibss->basic_rates,
+                                 ifibss->chandef.chan, ifibss->basic_rates,
                                  capability, 0, true);
 }
 
@@ -773,7 +754,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
        int active_ibss;
        u16 capability;
 
-       lockdep_assert_held(&ifibss->mtx);
+       sdata_assert_lock(sdata);
 
        active_ibss = ieee80211_sta_active_ibss(sdata);
        ibss_dbg(sdata, "sta_find_ibss (active_ibss=%d)\n", active_ibss);
@@ -787,7 +768,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
        if (ifibss->fixed_bssid)
                bssid = ifibss->bssid;
        if (ifibss->fixed_channel)
-               chan = ifibss->channel;
+               chan = ifibss->chandef.chan;
        if (!is_zero_ether_addr(ifibss->bssid))
                bssid = ifibss->bssid;
        cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid,
@@ -843,10 +824,10 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
        struct beacon_data *presp;
        u8 *pos, *end;
 
-       lockdep_assert_held(&ifibss->mtx);
+       sdata_assert_lock(sdata);
 
        presp = rcu_dereference_protected(ifibss->presp,
-                                         lockdep_is_held(&ifibss->mtx));
+                                         lockdep_is_held(&sdata->wdev.mtx));
 
        if (ifibss->state != IEEE80211_IBSS_MLME_JOINED ||
            len < 24 + 2 || !presp)
@@ -930,7 +911,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
        mgmt = (struct ieee80211_mgmt *) skb->data;
        fc = le16_to_cpu(mgmt->frame_control);
 
-       mutex_lock(&sdata->u.ibss.mtx);
+       sdata_lock(sdata);
 
        if (!sdata->u.ibss.ssid_len)
                goto mgmt_out; /* not ready to merge yet */
@@ -953,7 +934,7 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
        }
 
  mgmt_out:
-       mutex_unlock(&sdata->u.ibss.mtx);
+       sdata_unlock(sdata);
 }
 
 void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
@@ -961,7 +942,7 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct sta_info *sta;
 
-       mutex_lock(&ifibss->mtx);
+       sdata_lock(sdata);
 
        /*
         * Work could be scheduled after scan or similar
@@ -978,7 +959,7 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
                list_del(&sta->list);
                spin_unlock_bh(&ifibss->incomplete_lock);
 
-               ieee80211_ibss_finish_sta(sta, true);
+               ieee80211_ibss_finish_sta(sta);
                rcu_read_unlock();
                spin_lock_bh(&ifibss->incomplete_lock);
        }
@@ -997,7 +978,7 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
        }
 
  out:
-       mutex_unlock(&ifibss->mtx);
+       sdata_unlock(sdata);
 }
 
 static void ieee80211_ibss_timer(unsigned long data)
@@ -1014,7 +995,6 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
 
        setup_timer(&ifibss->timer, ieee80211_ibss_timer,
                    (unsigned long) sdata);
-       mutex_init(&ifibss->mtx);
        INIT_LIST_HEAD(&ifibss->incomplete_stations);
        spin_lock_init(&ifibss->incomplete_lock);
 }
@@ -1041,8 +1021,6 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
 {
        u32 changed = 0;
 
-       mutex_lock(&sdata->u.ibss.mtx);
-
        if (params->bssid) {
                memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN);
                sdata->u.ibss.fixed_bssid = true;
@@ -1057,9 +1035,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
 
        sdata->vif.bss_conf.beacon_int = params->beacon_interval;
 
-       sdata->u.ibss.channel = params->chandef.chan;
-       sdata->u.ibss.channel_type =
-               cfg80211_get_chandef_type(&params->chandef);
+       sdata->u.ibss.chandef = params->chandef;
        sdata->u.ibss.fixed_channel = params->channel_fixed;
 
        if (params->ie) {
@@ -1075,8 +1051,6 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
        memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len);
        sdata->u.ibss.ssid_len = params->ssid_len;
 
-       mutex_unlock(&sdata->u.ibss.mtx);
-
        /*
         * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is
         * reserved, but an HT STA shall protect HT transmissions as though
@@ -1112,8 +1086,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
        struct sta_info *sta;
        struct beacon_data *presp;
 
-       mutex_lock(&sdata->u.ibss.mtx);
-
        active_ibss = ieee80211_sta_active_ibss(sdata);
 
        if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
@@ -1122,7 +1094,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
                if (ifibss->privacy)
                        capability |= WLAN_CAPABILITY_PRIVACY;
 
-               cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->channel,
+               cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan,
                                        ifibss->bssid, ifibss->ssid,
                                        ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
                                        WLAN_CAPABILITY_PRIVACY,
@@ -1157,7 +1129,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
        /* remove beacon */
        kfree(sdata->u.ibss.ie);
        presp = rcu_dereference_protected(ifibss->presp,
-                                         lockdep_is_held(&sdata->u.ibss.mtx));
+                                         lockdep_is_held(&sdata->wdev.mtx));
        RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
        sdata->vif.bss_conf.ibss_joined = false;
        sdata->vif.bss_conf.ibss_creator = false;
@@ -1173,7 +1145,5 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 
        del_timer_sync(&sdata->u.ibss.timer);
 
-       mutex_unlock(&sdata->u.ibss.mtx);
-
        return 0;
 }
index 9ca8e3278cc0398abb8464c40954e2a9cf73a5cb..8412a303993a647eed61e6b0794e91e49b27dd29 100644 (file)
@@ -94,6 +94,7 @@ struct ieee80211_bss {
 #define IEEE80211_MAX_SUPP_RATES 32
        u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
        size_t supp_rates_len;
+       struct ieee80211_rate *beacon_rate;
 
        /*
         * During association, we save an ERP value from a probe response so
@@ -366,7 +367,7 @@ struct ieee80211_mgd_assoc_data {
        u8 ssid_len;
        u8 supp_rates_len;
        bool wmm, uapsd;
-       bool have_beacon, need_beacon;
+       bool need_beacon;
        bool synced;
        bool timeout_started;
 
@@ -394,7 +395,6 @@ struct ieee80211_if_managed {
        bool nullfunc_failed;
        bool connection_loss;
 
-       struct mutex mtx;
        struct cfg80211_bss *associated;
        struct ieee80211_mgd_auth_data *auth_data;
        struct ieee80211_mgd_assoc_data *assoc_data;
@@ -405,6 +405,7 @@ struct ieee80211_if_managed {
 
        bool powersave; /* powersave requested for this iface */
        bool broken_ap; /* AP is broken -- turn off powersave */
+       bool have_beacon;
        u8 dtim_period;
        enum ieee80211_smps_mode req_smps, /* requested smps mode */
                                 driver_smps_mode; /* smps mode request */
@@ -488,8 +489,6 @@ struct ieee80211_if_managed {
 struct ieee80211_if_ibss {
        struct timer_list timer;
 
-       struct mutex mtx;
-
        unsigned long last_scan_completed;
 
        u32 basic_rates;
@@ -499,14 +498,12 @@ struct ieee80211_if_ibss {
        bool privacy;
 
        bool control_port;
-       unsigned int auth_frame_registrations;
 
        u8 bssid[ETH_ALEN] __aligned(2);
        u8 ssid[IEEE80211_MAX_SSID_LEN];
        u8 ssid_len, ie_len;
        u8 *ie;
-       struct ieee80211_channel *channel;
-       enum nl80211_channel_type channel_type;
+       struct cfg80211_chan_def chandef;
 
        unsigned long ibss_join_req;
        /* probe response/beacon for IBSS */
@@ -545,6 +542,7 @@ struct ieee80211_if_mesh {
        struct timer_list mesh_path_root_timer;
 
        unsigned long wrkq_flags;
+       unsigned long mbss_changed;
 
        u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
        size_t mesh_id_len;
@@ -580,8 +578,6 @@ struct ieee80211_if_mesh {
        bool accepting_plinks;
        int num_gates;
        struct beacon_data __rcu *beacon;
-       /* just protects beacon updates for now */
-       struct mutex mtx;
        const u8 *ie;
        u8 ie_len;
        enum {
@@ -778,6 +774,26 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
        return container_of(p, struct ieee80211_sub_if_data, vif);
 }
 
+static inline void sdata_lock(struct ieee80211_sub_if_data *sdata)
+       __acquires(&sdata->wdev.mtx)
+{
+       mutex_lock(&sdata->wdev.mtx);
+       __acquire(&sdata->wdev.mtx);
+}
+
+static inline void sdata_unlock(struct ieee80211_sub_if_data *sdata)
+       __releases(&sdata->wdev.mtx)
+{
+       mutex_unlock(&sdata->wdev.mtx);
+       __release(&sdata->wdev.mtx);
+}
+
+static inline void
+sdata_assert_lock(struct ieee80211_sub_if_data *sdata)
+{
+       lockdep_assert_held(&sdata->wdev.mtx);
+}
+
 static inline enum ieee80211_band
 ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata)
 {
@@ -1507,9 +1523,6 @@ static inline void ieee802_11_parse_elems(const u8 *start, size_t len,
        ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
 }
 
-u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
-                             enum ieee80211_band band);
-
 void ieee80211_dynamic_ps_enable_work(struct work_struct *work);
 void ieee80211_dynamic_ps_disable_work(struct work_struct *work);
 void ieee80211_dynamic_ps_timer(unsigned long data);
index 98d20c0f6fed8829c1881e16b274c2bf97c589c4..a2a8250e2f845d6441f60e177357900d3d411155 100644 (file)
@@ -159,7 +159,8 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr)
+static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr,
+                               bool check_dup)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_sub_if_data *iter;
@@ -180,13 +181,16 @@ static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr)
                ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
                ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
 
+       if (!check_dup)
+               return ret;
 
        mutex_lock(&local->iflist_mtx);
        list_for_each_entry(iter, &local->interfaces, list) {
                if (iter == sdata)
                        continue;
 
-               if (iter->vif.type == NL80211_IFTYPE_MONITOR)
+               if (iter->vif.type == NL80211_IFTYPE_MONITOR &&
+                   !(iter->u.mntr_flags & MONITOR_FLAG_ACTIVE))
                        continue;
 
                m = iter->vif.addr;
@@ -208,12 +212,17 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct sockaddr *sa = addr;
+       bool check_dup = true;
        int ret;
 
        if (ieee80211_sdata_running(sdata))
                return -EBUSY;
 
-       ret = ieee80211_verify_mac(sdata, sa->sa_data);
+       if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+           !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
+               check_dup = false;
+
+       ret = ieee80211_verify_mac(sdata, sa->sa_data, check_dup);
        if (ret)
                return ret;
 
@@ -545,7 +554,11 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
                        break;
                }
 
-               if (local->monitors == 0 && local->open_count == 0) {
+               if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) {
+                       res = drv_add_interface(local, sdata);
+                       if (res)
+                               goto err_stop;
+               } else if (local->monitors == 0 && local->open_count == 0) {
                        res = ieee80211_add_virtual_monitor(local);
                        if (res)
                                goto err_stop;
@@ -923,7 +936,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                mutex_lock(&local->mtx);
                ieee80211_recalc_idle(local);
                mutex_unlock(&local->mtx);
-               break;
+
+               if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
+                       break;
+
+               /* fall through */
        default:
                if (going_down)
                        drv_remove_interface(local, sdata);
@@ -1072,7 +1089,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
        .ndo_start_xmit         = ieee80211_monitor_start_xmit,
        .ndo_set_rx_mode        = ieee80211_set_multicast_list,
        .ndo_change_mtu         = ieee80211_change_mtu,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = ieee80211_change_mac,
        .ndo_select_queue       = ieee80211_monitor_select_queue,
 };
 
index 67059b88fea5f28619c97ad2ca8c150840045055..e39cc91d0cf125c584464ef73be6df5e97729e3d 100644 (file)
@@ -335,12 +335,12 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
        switch (cipher) {
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
-               key->conf.iv_len = WEP_IV_LEN;
-               key->conf.icv_len = WEP_ICV_LEN;
+               key->conf.iv_len = IEEE80211_WEP_IV_LEN;
+               key->conf.icv_len = IEEE80211_WEP_ICV_LEN;
                break;
        case WLAN_CIPHER_SUITE_TKIP:
-               key->conf.iv_len = TKIP_IV_LEN;
-               key->conf.icv_len = TKIP_ICV_LEN;
+               key->conf.iv_len = IEEE80211_TKIP_IV_LEN;
+               key->conf.icv_len = IEEE80211_TKIP_ICV_LEN;
                if (seq) {
                        for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
                                key->u.tkip.rx[i].iv32 =
@@ -352,13 +352,13 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
                spin_lock_init(&key->u.tkip.txlock);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
-               key->conf.iv_len = CCMP_HDR_LEN;
-               key->conf.icv_len = CCMP_MIC_LEN;
+               key->conf.iv_len = IEEE80211_CCMP_HDR_LEN;
+               key->conf.icv_len = IEEE80211_CCMP_MIC_LEN;
                if (seq) {
                        for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
-                               for (j = 0; j < CCMP_PN_LEN; j++)
+                               for (j = 0; j < IEEE80211_CCMP_PN_LEN; j++)
                                        key->u.ccmp.rx_pn[i][j] =
-                                               seq[CCMP_PN_LEN - j - 1];
+                                               seq[IEEE80211_CCMP_PN_LEN - j - 1];
                }
                /*
                 * Initialize AES key state here as an optimization so that
@@ -375,9 +375,9 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
                key->conf.iv_len = 0;
                key->conf.icv_len = sizeof(struct ieee80211_mmie);
                if (seq)
-                       for (j = 0; j < CMAC_PN_LEN; j++)
+                       for (j = 0; j < IEEE80211_CMAC_PN_LEN; j++)
                                key->u.aes_cmac.rx_pn[j] =
-                                       seq[CMAC_PN_LEN - j - 1];
+                                       seq[IEEE80211_CMAC_PN_LEN - j - 1];
                /*
                 * Initialize AES key state here as an optimization so that
                 * it does not need to be initialized for every packet.
@@ -740,13 +740,13 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
                        pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS];
                else
                        pn = key->u.ccmp.rx_pn[tid];
-               memcpy(seq->ccmp.pn, pn, CCMP_PN_LEN);
+               memcpy(seq->ccmp.pn, pn, IEEE80211_CCMP_PN_LEN);
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
                if (WARN_ON(tid != 0))
                        return;
                pn = key->u.aes_cmac.rx_pn;
-               memcpy(seq->aes_cmac.pn, pn, CMAC_PN_LEN);
+               memcpy(seq->aes_cmac.pn, pn, IEEE80211_CMAC_PN_LEN);
                break;
        }
 }
index e8de3e6d7804be1e9562531be2d28372532897ea..036d57e76a5e37d4156d5abedcd479e0aa0ffb6e 100644 (file)
 #define NUM_DEFAULT_KEYS 4
 #define NUM_DEFAULT_MGMT_KEYS 2
 
-#define WEP_IV_LEN             4
-#define WEP_ICV_LEN            4
-#define ALG_CCMP_KEY_LEN       16
-#define CCMP_HDR_LEN           8
-#define CCMP_MIC_LEN           8
-#define CCMP_TK_LEN            16
-#define CCMP_PN_LEN            6
-#define TKIP_IV_LEN            8
-#define TKIP_ICV_LEN           4
-#define CMAC_PN_LEN            6
-
 struct ieee80211_local;
 struct ieee80211_sub_if_data;
 struct sta_info;
@@ -93,13 +82,13 @@ struct ieee80211_key {
                         * frames and the last counter is used with Robust
                         * Management frames.
                         */
-                       u8 rx_pn[IEEE80211_NUM_TIDS + 1][CCMP_PN_LEN];
+                       u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_CCMP_PN_LEN];
                        struct crypto_cipher *tfm;
                        u32 replays; /* dot11RSNAStatsCCMPReplays */
                } ccmp;
                struct {
                        atomic64_t tx_pn;
-                       u8 rx_pn[CMAC_PN_LEN];
+                       u8 rx_pn[IEEE80211_CMAC_PN_LEN];
                        struct crypto_cipher *tfm;
                        u32 replays; /* dot11RSNAStatsCMACReplays */
                        u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
index 8a7bfc47d5778fd7bbc3f561ecf483996c2041e1..626c83c042d78b74df144c92bf83f0970214e46a 100644 (file)
@@ -331,7 +331,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
                return NOTIFY_DONE;
 
        ifmgd = &sdata->u.mgd;
-       mutex_lock(&ifmgd->mtx);
+       sdata_lock(sdata);
 
        /* Copy the addresses to the bss_conf list */
        ifa = idev->ifa_list;
@@ -349,7 +349,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
                ieee80211_bss_info_change_notify(sdata,
                                                 BSS_CHANGED_ARP_FILTER);
 
-       mutex_unlock(&ifmgd->mtx);
+       sdata_unlock(sdata);
 
        return NOTIFY_DONE;
 }
@@ -686,8 +686,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                return -EINVAL;
 
 #ifdef CONFIG_PM
-       if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns) &&
-           (!local->ops->suspend || !local->ops->resume))
+       if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume))
                return -EINVAL;
 #endif
 
index 6952760881c8f25fafe15efd03fe4dbd3eb443a1..447f41bbe744d79b570eb4c85dd4cdb8ffb72e70 100644 (file)
@@ -271,8 +271,7 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
        *pos++ = ifmsh->mesh_auth_id;
        /* Mesh Formation Info - number of neighbors */
        neighbors = atomic_read(&ifmsh->estab_plinks);
-       /* Number of neighbor mesh STAs or 15 whichever is smaller */
-       neighbors = (neighbors > 15) ? 15 : neighbors;
+       neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS);
        *pos++ = neighbors << 1;
        /* Mesh capability */
        *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING;
@@ -417,7 +416,9 @@ int mesh_add_ht_cap_ie(struct ieee80211_sub_if_data *sdata,
 
        sband = local->hw.wiphy->bands[band];
        if (!sband->ht_cap.ht_supported ||
-           sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+           sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
+           sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
+           sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
                return 0;
 
        if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap))
@@ -573,7 +574,7 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        u32 changed;
 
-       ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
+       ieee80211_sta_expire(sdata, ifmsh->mshcfg.plink_timeout * HZ);
        mesh_path_expire(sdata);
 
        changed = mesh_accept_plinks_update(sdata);
@@ -697,38 +698,38 @@ out_free:
 }
 
 static int
-ieee80211_mesh_rebuild_beacon(struct ieee80211_if_mesh *ifmsh)
+ieee80211_mesh_rebuild_beacon(struct ieee80211_sub_if_data *sdata)
 {
        struct beacon_data *old_bcn;
        int ret;
 
-       mutex_lock(&ifmsh->mtx);
-
-       old_bcn = rcu_dereference_protected(ifmsh->beacon,
-                                           lockdep_is_held(&ifmsh->mtx));
-       ret = ieee80211_mesh_build_beacon(ifmsh);
+       old_bcn = rcu_dereference_protected(sdata->u.mesh.beacon,
+                                           lockdep_is_held(&sdata->wdev.mtx));
+       ret = ieee80211_mesh_build_beacon(&sdata->u.mesh);
        if (ret)
                /* just reuse old beacon */
-               goto out;
+               return ret;
 
        if (old_bcn)
                kfree_rcu(old_bcn, rcu_head);
-out:
-       mutex_unlock(&ifmsh->mtx);
-       return ret;
+       return 0;
 }
 
 void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
                                       u32 changed)
 {
-       if (sdata->vif.bss_conf.enable_beacon &&
-           (changed & (BSS_CHANGED_BEACON |
-                       BSS_CHANGED_HT |
-                       BSS_CHANGED_BASIC_RATES |
-                       BSS_CHANGED_BEACON_INT)))
-               if (ieee80211_mesh_rebuild_beacon(&sdata->u.mesh))
-                       return;
-       ieee80211_bss_info_change_notify(sdata, changed);
+       struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       unsigned long bits = changed;
+       u32 bit;
+
+       if (!bits)
+               return;
+
+       /* if we race with running work, worst case this work becomes a noop */
+       for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE)
+               set_bit(bit, &ifmsh->mbss_changed);
+       set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags);
+       ieee80211_queue_work(&sdata->local->hw, &sdata->work);
 }
 
 int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
@@ -740,7 +741,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
                      BSS_CHANGED_HT |
                      BSS_CHANGED_BASIC_RATES |
                      BSS_CHANGED_BEACON_INT;
-       enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
 
        local->fif_other_bss++;
        /* mesh ifaces must set allmulti to forward mcast traffic */
@@ -748,7 +748,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
        ieee80211_configure_filter(local);
 
        ifmsh->mesh_cc_id = 0;  /* Disabled */
-       ifmsh->mesh_auth_id = 0;        /* Disabled */
        /* register sync ops from extensible synchronization framework */
        ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
        ifmsh->adjusting_tbtt = false;
@@ -759,8 +758,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
        sdata->vif.bss_conf.ht_operation_mode =
                                ifmsh->mshcfg.ht_opmode;
        sdata->vif.bss_conf.enable_beacon = true;
-       sdata->vif.bss_conf.basic_rates =
-               ieee80211_mandatory_rates(local, band);
 
        changed |= ieee80211_mps_local_status_update(sdata);
 
@@ -788,12 +785,10 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
        sdata->vif.bss_conf.enable_beacon = false;
        clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
-       mutex_lock(&ifmsh->mtx);
        bcn = rcu_dereference_protected(ifmsh->beacon,
-                                       lockdep_is_held(&ifmsh->mtx));
+                                       lockdep_is_held(&sdata->wdev.mtx));
        rcu_assign_pointer(ifmsh->beacon, NULL);
        kfree_rcu(bcn, rcu_head);
-       mutex_unlock(&ifmsh->mtx);
 
        /* flush STAs and mpaths on this iface */
        sta_info_flush(sdata);
@@ -806,14 +801,10 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
        del_timer_sync(&sdata->u.mesh.housekeeping_timer);
        del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
        del_timer_sync(&sdata->u.mesh.mesh_path_timer);
-       /*
-        * If the timer fired while we waited for it, it will have
-        * requeued the work. Now the work will be running again
-        * but will not rearm the timer again because it checks
-        * whether the interface is running, which, at this point,
-        * it no longer is.
-        */
-       cancel_work_sync(&sdata->work);
+
+       /* clear any mesh work (for next join) we may have accrued */
+       ifmsh->wrkq_flags = 0;
+       ifmsh->mbss_changed = 0;
 
        local->fif_other_bss--;
        atomic_dec(&local->iff_allmultis);
@@ -954,6 +945,12 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_mgmt *mgmt;
        u16 stype;
 
+       sdata_lock(sdata);
+
+       /* mesh already went down */
+       if (!sdata->wdev.mesh_id_len)
+               goto out;
+
        rx_status = IEEE80211_SKB_RXCB(skb);
        mgmt = (struct ieee80211_mgmt *) skb->data;
        stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
@@ -971,12 +968,42 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
                ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
                break;
        }
+out:
+       sdata_unlock(sdata);
+}
+
+static void mesh_bss_info_changed(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       u32 bit, changed = 0;
+
+       for_each_set_bit(bit, &ifmsh->mbss_changed,
+                        sizeof(changed) * BITS_PER_BYTE) {
+               clear_bit(bit, &ifmsh->mbss_changed);
+               changed |= BIT(bit);
+       }
+
+       if (sdata->vif.bss_conf.enable_beacon &&
+           (changed & (BSS_CHANGED_BEACON |
+                       BSS_CHANGED_HT |
+                       BSS_CHANGED_BASIC_RATES |
+                       BSS_CHANGED_BEACON_INT)))
+               if (ieee80211_mesh_rebuild_beacon(sdata))
+                       return;
+
+       ieee80211_bss_info_change_notify(sdata, changed);
 }
 
 void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
+       sdata_lock(sdata);
+
+       /* mesh already went down */
+       if (!sdata->wdev.mesh_id_len)
+               goto out;
+
        if (ifmsh->preq_queue_len &&
            time_after(jiffies,
                       ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
@@ -996,6 +1023,11 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
 
        if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags))
                mesh_sync_adjust_tbtt(sdata);
+
+       if (test_and_clear_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags))
+               mesh_bss_info_changed(sdata);
+out:
+       sdata_unlock(sdata);
 }
 
 void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
@@ -1041,7 +1073,6 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
        spin_lock_init(&ifmsh->mesh_preq_queue_lock);
        spin_lock_init(&ifmsh->sync_offset_lock);
        RCU_INIT_POINTER(ifmsh->beacon, NULL);
-       mutex_init(&ifmsh->mtx);
 
        sdata->vif.bss_conf.bssid = zero_addr;
 }
index da158774eebb69eac8abf0a23472f766532cf00a..2bc7fd2f787dd9546ac616e8c55340e1957eafb1 100644 (file)
@@ -58,6 +58,7 @@ enum mesh_path_flags {
  * @MESH_WORK_ROOT: the mesh root station needs to send a frame
  * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other
  * mesh nodes
+ * @MESH_WORK_MBSS_CHANGED: rebuild beacon and notify driver of BSS changes
  */
 enum mesh_deferred_task_flags {
        MESH_WORK_HOUSEKEEPING,
@@ -65,6 +66,7 @@ enum mesh_deferred_task_flags {
        MESH_WORK_GROW_MPP_TABLE,
        MESH_WORK_ROOT,
        MESH_WORK_DRIFT_ADJUST,
+       MESH_WORK_MBSS_CHANGED,
 };
 
 /**
@@ -188,7 +190,6 @@ struct mesh_rmc {
        u32 idx_mask;
 };
 
-#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
 #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
 
 #define MESH_PATH_EXPIRE (600 * HZ)
@@ -324,14 +325,14 @@ static inline
 u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
 {
        atomic_inc(&sdata->u.mesh.estab_plinks);
-       return mesh_accept_plinks_update(sdata);
+       return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON;
 }
 
 static inline
 u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
 {
        atomic_dec(&sdata->u.mesh.estab_plinks);
-       return mesh_accept_plinks_update(sdata);
+       return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON;
 }
 
 static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata)
index 09bebed99416d1f848dd95f98cf488de20a36fdf..02c05fa15c203b9a51dc576579f7455979fe0376 100644 (file)
@@ -154,8 +154,14 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
        u16 ht_opmode;
        bool non_ht_sta = false, ht20_sta = false;
 
-       if (sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT)
+       switch (sdata->vif.bss_conf.chandef.width) {
+       case NL80211_CHAN_WIDTH_20_NOHT:
+       case NL80211_CHAN_WIDTH_5:
+       case NL80211_CHAN_WIDTH_10:
                return 0;
+       default:
+               break;
+       }
 
        rcu_read_lock();
        list_for_each_entry_rcu(sta, &local->sta_list, list) {
index 741448b308257c3a6f01c3d09ca36d7ddaa31ca3..ae31968d42d3b855eebe05dcf0e660b4d367ff8f 100644 (file)
@@ -90,41 +90,6 @@ MODULE_PARM_DESC(probe_wait_ms,
  */
 #define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
 
-/*
- * All cfg80211 functions have to be called outside a locked
- * section so that they can acquire a lock themselves... This
- * is much simpler than queuing up things in cfg80211, but we
- * do need some indirection for that here.
- */
-enum rx_mgmt_action {
-       /* no action required */
-       RX_MGMT_NONE,
-
-       /* caller must call cfg80211_send_deauth() */
-       RX_MGMT_CFG80211_DEAUTH,
-
-       /* caller must call cfg80211_send_disassoc() */
-       RX_MGMT_CFG80211_DISASSOC,
-
-       /* caller must call cfg80211_send_rx_auth() */
-       RX_MGMT_CFG80211_RX_AUTH,
-
-       /* caller must call cfg80211_send_rx_assoc() */
-       RX_MGMT_CFG80211_RX_ASSOC,
-
-       /* caller must call cfg80211_send_assoc_timeout() */
-       RX_MGMT_CFG80211_ASSOC_TIMEOUT,
-
-       /* used when a processed beacon causes a deauth */
-       RX_MGMT_CFG80211_TX_DEAUTH,
-};
-
-/* utils */
-static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd)
-{
-       lockdep_assert_held(&ifmgd->mtx);
-}
-
 /*
  * We can have multiple work items (and connection probing)
  * scheduling this timer, but we need to take care to only
@@ -135,13 +100,14 @@ static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd)
  * has happened -- the work that runs from this timer will
  * do that.
  */
-static void run_again(struct ieee80211_if_managed *ifmgd, unsigned long timeout)
+static void run_again(struct ieee80211_sub_if_data *sdata,
+                     unsigned long timeout)
 {
-       ASSERT_MGD_MTX(ifmgd);
+       sdata_assert_lock(sdata);
 
-       if (!timer_pending(&ifmgd->timer) ||
-           time_before(timeout, ifmgd->timer.expires))
-               mod_timer(&ifmgd->timer, timeout);
+       if (!timer_pending(&sdata->u.mgd.timer) ||
+           time_before(timeout, sdata->u.mgd.timer.expires))
+               mod_timer(&sdata->u.mgd.timer, timeout);
 }
 
 void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
@@ -224,6 +190,12 @@ static u32 chandef_downgrade(struct cfg80211_chan_def *c)
                c->width = NL80211_CHAN_WIDTH_20_NOHT;
                ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
                break;
+       case NL80211_CHAN_WIDTH_5:
+       case NL80211_CHAN_WIDTH_10:
+               WARN_ON_ONCE(1);
+               /* keep c->width */
+               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+               break;
        }
 
        WARN_ON_ONCE(!cfg80211_chandef_valid(c));
@@ -652,7 +624,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_channel *chan;
        u32 rates = 0;
 
-       lockdep_assert_held(&ifmgd->mtx);
+       sdata_assert_lock(sdata);
 
        rcu_read_lock();
        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
@@ -914,6 +886,10 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
 
        IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
                                        IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
+
+       if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+               IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+
        if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
                            IEEE80211_STA_CONNECTION_POLL))
                IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
@@ -962,7 +938,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
        if (!ieee80211_sdata_running(sdata))
                return;
 
-       mutex_lock(&ifmgd->mtx);
+       sdata_lock(sdata);
        if (!ifmgd->associated)
                goto out;
 
@@ -985,7 +961,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
  out:
        ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
-       mutex_unlock(&ifmgd->mtx);
+       sdata_unlock(sdata);
 }
 
 void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success)
@@ -1036,7 +1012,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        const struct ieee80211_ht_operation *ht_oper;
        int secondary_channel_offset = -1;
 
-       ASSERT_MGD_MTX(ifmgd);
+       sdata_assert_lock(sdata);
 
        if (!cbss)
                return;
@@ -1390,6 +1366,9 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
                          IEEE80211_STA_CONNECTION_POLL))
                return false;
 
+       if (!mgd->have_beacon)
+               return false;
+
        rcu_read_lock();
        sta = sta_info_get(sdata, mgd->bssid);
        if (sta)
@@ -1798,7 +1777,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_led_assoc(local, 1);
 
-       if (sdata->u.mgd.assoc_data->have_beacon) {
+       if (sdata->u.mgd.have_beacon) {
                /*
                 * If the AP is buggy we may get here with no DTIM period
                 * known, so assume it's 1 which is the only safe assumption
@@ -1806,8 +1785,10 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
                 * probably just won't work at all.
                 */
                bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1;
-               bss_info_changed |= BSS_CHANGED_DTIM_PERIOD;
+               bss_conf->beacon_rate = bss->beacon_rate;
+               bss_info_changed |= BSS_CHANGED_BEACON_INFO;
        } else {
+               bss_conf->beacon_rate = NULL;
                bss_conf->dtim_period = 0;
        }
 
@@ -1842,7 +1823,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        u32 changed = 0;
 
-       ASSERT_MGD_MTX(ifmgd);
+       sdata_assert_lock(sdata);
 
        if (WARN_ON_ONCE(tx && !frame_buf))
                return;
@@ -1930,6 +1911,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        del_timer_sync(&sdata->u.mgd.chswitch_timer);
 
        sdata->vif.bss_conf.dtim_period = 0;
+       sdata->vif.bss_conf.beacon_rate = NULL;
+
+       ifmgd->have_beacon = false;
 
        ifmgd->flags = 0;
        ieee80211_vif_release_channel(sdata);
@@ -2051,7 +2035,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
        }
 
        ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
-       run_again(ifmgd, ifmgd->probe_timeout);
+       run_again(sdata, ifmgd->probe_timeout);
        if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
                ieee80211_flush_queues(sdata->local, sdata);
 }
@@ -2065,7 +2049,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
        if (!ieee80211_sdata_running(sdata))
                return;
 
-       mutex_lock(&ifmgd->mtx);
+       sdata_lock(sdata);
 
        if (!ifmgd->associated)
                goto out;
@@ -2119,7 +2103,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
        ifmgd->probe_send_count = 0;
        ieee80211_mgd_probe_ap_send(sdata);
  out:
-       mutex_unlock(&ifmgd->mtx);
+       sdata_unlock(sdata);
 }
 
 struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
@@ -2135,7 +2119,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
        if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
                return NULL;
 
-       ASSERT_MGD_MTX(ifmgd);
+       sdata_assert_lock(sdata);
 
        if (ifmgd->associated)
                cbss = ifmgd->associated;
@@ -2168,9 +2152,9 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
-       mutex_lock(&ifmgd->mtx);
+       sdata_lock(sdata);
        if (!ifmgd->associated) {
-               mutex_unlock(&ifmgd->mtx);
+               sdata_unlock(sdata);
                return;
        }
 
@@ -2181,13 +2165,10 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
        ieee80211_wake_queues_by_reason(&sdata->local->hw,
                                        IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
-       mutex_unlock(&ifmgd->mtx);
 
-       /*
-        * must be outside lock due to cfg80211,
-        * but that's not a problem.
-        */
-       cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
+       cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
+                             IEEE80211_DEAUTH_FRAME_LEN);
+       sdata_unlock(sdata);
 }
 
 static void ieee80211_beacon_connection_loss_work(struct work_struct *work)
@@ -2254,7 +2235,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
 
-       lockdep_assert_held(&sdata->u.mgd.mtx);
+       sdata_assert_lock(sdata);
 
        if (!assoc) {
                sta_info_destroy_addr(sdata, auth_data->bss->bssid);
@@ -2295,27 +2276,26 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
                            auth_data->key_idx, tx_flags);
 }
 
-static enum rx_mgmt_action __must_check
-ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
-                      struct ieee80211_mgmt *mgmt, size_t len)
+static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
+                                  struct ieee80211_mgmt *mgmt, size_t len)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 bssid[ETH_ALEN];
        u16 auth_alg, auth_transaction, status_code;
        struct sta_info *sta;
 
-       lockdep_assert_held(&ifmgd->mtx);
+       sdata_assert_lock(sdata);
 
        if (len < 24 + 6)
-               return RX_MGMT_NONE;
+               return;
 
        if (!ifmgd->auth_data || ifmgd->auth_data->done)
-               return RX_MGMT_NONE;
+               return;
 
        memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
 
        if (!ether_addr_equal(bssid, mgmt->bssid))
-               return RX_MGMT_NONE;
+               return;
 
        auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
        auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
@@ -2327,14 +2307,15 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
                           mgmt->sa, auth_alg, ifmgd->auth_data->algorithm,
                           auth_transaction,
                           ifmgd->auth_data->expected_transaction);
-               return RX_MGMT_NONE;
+               return;
        }
 
        if (status_code != WLAN_STATUS_SUCCESS) {
                sdata_info(sdata, "%pM denied authentication (status %d)\n",
                           mgmt->sa, status_code);
                ieee80211_destroy_auth_data(sdata, false);
-               return RX_MGMT_CFG80211_RX_AUTH;
+               cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+               return;
        }
 
        switch (ifmgd->auth_data->algorithm) {
@@ -2347,20 +2328,20 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
                if (ifmgd->auth_data->expected_transaction != 4) {
                        ieee80211_auth_challenge(sdata, mgmt, len);
                        /* need another frame */
-                       return RX_MGMT_NONE;
+                       return;
                }
                break;
        default:
                WARN_ONCE(1, "invalid auth alg %d",
                          ifmgd->auth_data->algorithm);
-               return RX_MGMT_NONE;
+               return;
        }
 
        sdata_info(sdata, "authenticated\n");
        ifmgd->auth_data->done = true;
        ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
        ifmgd->auth_data->timeout_started = true;
-       run_again(ifmgd, ifmgd->auth_data->timeout);
+       run_again(sdata, ifmgd->auth_data->timeout);
 
        if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
            ifmgd->auth_data->expected_transaction != 2) {
@@ -2368,7 +2349,8 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
                 * Report auth frame to user space for processing since another
                 * round of Authentication frames is still needed.
                 */
-               return RX_MGMT_CFG80211_RX_AUTH;
+               cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+               return;
        }
 
        /* move station state to auth */
@@ -2384,30 +2366,29 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
        }
        mutex_unlock(&sdata->local->sta_mtx);
 
-       return RX_MGMT_CFG80211_RX_AUTH;
+       cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
+       return;
  out_err:
        mutex_unlock(&sdata->local->sta_mtx);
        /* ignore frame -- wait for timeout */
-       return RX_MGMT_NONE;
 }
 
 
-static enum rx_mgmt_action __must_check
-ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
-                        struct ieee80211_mgmt *mgmt, size_t len)
+static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
+                                    struct ieee80211_mgmt *mgmt, size_t len)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        const u8 *bssid = NULL;
        u16 reason_code;
 
-       lockdep_assert_held(&ifmgd->mtx);
+       sdata_assert_lock(sdata);
 
        if (len < 24 + 2)
-               return RX_MGMT_NONE;
+               return;
 
        if (!ifmgd->associated ||
            !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
-               return RX_MGMT_NONE;
+               return;
 
        bssid = ifmgd->associated->bssid;
 
@@ -2418,25 +2399,24 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
 
-       return RX_MGMT_CFG80211_DEAUTH;
+       cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
 }
 
 
-static enum rx_mgmt_action __must_check
-ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
-                          struct ieee80211_mgmt *mgmt, size_t len)
+static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
+                                      struct ieee80211_mgmt *mgmt, size_t len)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u16 reason_code;
 
-       lockdep_assert_held(&ifmgd->mtx);
+       sdata_assert_lock(sdata);
 
        if (len < 24 + 2)
-               return RX_MGMT_NONE;
+               return;
 
        if (!ifmgd->associated ||
            !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
-               return RX_MGMT_NONE;
+               return;
 
        reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
 
@@ -2445,7 +2425,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
 
-       return RX_MGMT_CFG80211_DISASSOC;
+       cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
 }
 
 static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
@@ -2495,7 +2475,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
 
-       lockdep_assert_held(&sdata->u.mgd.mtx);
+       sdata_assert_lock(sdata);
 
        if (!assoc) {
                sta_info_destroy_addr(sdata, assoc_data->bss->bssid);
@@ -2749,10 +2729,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
        return ret;
 }
 
-static enum rx_mgmt_action __must_check
-ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
-                            struct ieee80211_mgmt *mgmt, size_t len,
-                            struct cfg80211_bss **bss)
+static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
+                                        struct ieee80211_mgmt *mgmt,
+                                        size_t len)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
@@ -2760,13 +2739,14 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
        struct ieee802_11_elems elems;
        u8 *pos;
        bool reassoc;
+       struct cfg80211_bss *bss;
 
-       lockdep_assert_held(&ifmgd->mtx);
+       sdata_assert_lock(sdata);
 
        if (!assoc_data)
-               return RX_MGMT_NONE;
+               return;
        if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid))
-               return RX_MGMT_NONE;
+               return;
 
        /*
         * AssocResp and ReassocResp have identical structure, so process both
@@ -2774,7 +2754,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
         */
 
        if (len < 24 + 6)
-               return RX_MGMT_NONE;
+               return;
 
        reassoc = ieee80211_is_reassoc_req(mgmt->frame_control);
        capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
@@ -2801,22 +2781,22 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
                assoc_data->timeout_started = true;
                if (ms > IEEE80211_ASSOC_TIMEOUT)
-                       run_again(ifmgd, assoc_data->timeout);
-               return RX_MGMT_NONE;
+                       run_again(sdata, assoc_data->timeout);
+               return;
        }
 
-       *bss = assoc_data->bss;
+       bss = assoc_data->bss;
 
        if (status_code != WLAN_STATUS_SUCCESS) {
                sdata_info(sdata, "%pM denied association (code=%d)\n",
                           mgmt->sa, status_code);
                ieee80211_destroy_assoc_data(sdata, false);
        } else {
-               if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
+               if (!ieee80211_assoc_success(sdata, bss, mgmt, len)) {
                        /* oops -- internal error -- send timeout for now */
                        ieee80211_destroy_assoc_data(sdata, false);
-                       cfg80211_put_bss(sdata->local->hw.wiphy, *bss);
-                       return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
+                       cfg80211_assoc_timeout(sdata->dev, bss);
+                       return;
                }
                sdata_info(sdata, "associated\n");
 
@@ -2828,7 +2808,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
                ieee80211_destroy_assoc_data(sdata, true);
        }
 
-       return RX_MGMT_CFG80211_RX_ASSOC;
+       cfg80211_rx_assoc_resp(sdata->dev, bss, (u8 *)mgmt, len);
 }
 
 static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -2840,23 +2820,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        int freq;
        struct ieee80211_bss *bss;
        struct ieee80211_channel *channel;
-       bool need_ps = false;
-
-       lockdep_assert_held(&sdata->u.mgd.mtx);
 
-       if ((sdata->u.mgd.associated &&
-            ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) ||
-           (sdata->u.mgd.assoc_data &&
-            ether_addr_equal(mgmt->bssid,
-                             sdata->u.mgd.assoc_data->bss->bssid))) {
-               /* not previously set so we may need to recalc */
-               need_ps = sdata->u.mgd.associated && !sdata->u.mgd.dtim_period;
-
-               if (elems->tim && !elems->parse_error) {
-                       const struct ieee80211_tim_ie *tim_ie = elems->tim;
-                       sdata->u.mgd.dtim_period = tim_ie->dtim_period;
-               }
-       }
+       sdata_assert_lock(sdata);
 
        if (elems->ds_params)
                freq = ieee80211_channel_to_frequency(elems->ds_params[0],
@@ -2871,19 +2836,15 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
 
        bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
                                        channel);
-       if (bss)
+       if (bss) {
                ieee80211_rx_bss_put(local, bss);
+               sdata->vif.bss_conf.beacon_rate = bss->beacon_rate;
+       }
 
        if (!sdata->u.mgd.associated ||
            !ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid))
                return;
 
-       if (need_ps) {
-               mutex_lock(&local->iflist_mtx);
-               ieee80211_recalc_ps(local, -1);
-               mutex_unlock(&local->iflist_mtx);
-       }
-
        ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
                                         elems, true);
 
@@ -2901,7 +2862,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
 
        ifmgd = &sdata->u.mgd;
 
-       ASSERT_MGD_MTX(ifmgd);
+       sdata_assert_lock(sdata);
 
        if (!ether_addr_equal(mgmt->da, sdata->vif.addr))
                return; /* ignore ProbeResp to foreign address */
@@ -2926,7 +2887,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
                ifmgd->auth_data->tries = 0;
                ifmgd->auth_data->timeout = jiffies;
                ifmgd->auth_data->timeout_started = true;
-               run_again(ifmgd, ifmgd->auth_data->timeout);
+               run_again(sdata, ifmgd->auth_data->timeout);
        }
 }
 
@@ -2951,10 +2912,9 @@ static const u64 care_about_ies =
        (1ULL << WLAN_EID_HT_CAPABILITY) |
        (1ULL << WLAN_EID_HT_OPERATION);
 
-static enum rx_mgmt_action
-ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
-                        struct ieee80211_mgmt *mgmt, size_t len,
-                        u8 *deauth_buf, struct ieee80211_rx_status *rx_status)
+static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
+                                    struct ieee80211_mgmt *mgmt, size_t len,
+                                    struct ieee80211_rx_status *rx_status)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
@@ -2969,24 +2929,25 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
        u8 erp_value = 0;
        u32 ncrc;
        u8 *bssid;
+       u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
-       lockdep_assert_held(&ifmgd->mtx);
+       sdata_assert_lock(sdata);
 
        /* Process beacon from the current BSS */
        baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
        if (baselen > len)
-               return RX_MGMT_NONE;
+               return;
 
        rcu_read_lock();
        chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
        if (!chanctx_conf) {
                rcu_read_unlock();
-               return RX_MGMT_NONE;
+               return;
        }
 
        if (rx_status->freq != chanctx_conf->def.chan->center_freq) {
                rcu_read_unlock();
-               return RX_MGMT_NONE;
+               return;
        }
        chan = chanctx_conf->def.chan;
        rcu_read_unlock();
@@ -2997,7 +2958,11 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                                       len - baselen, false, &elems);
 
                ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
-               ifmgd->assoc_data->have_beacon = true;
+               if (elems.tim && !elems.parse_error) {
+                       const struct ieee80211_tim_ie *tim_ie = elems.tim;
+                       ifmgd->dtim_period = tim_ie->dtim_period;
+               }
+               ifmgd->have_beacon = true;
                ifmgd->assoc_data->need_beacon = false;
                if (local->hw.flags & IEEE80211_HW_TIMING_BEACON_ONLY) {
                        sdata->vif.bss_conf.sync_tsf =
@@ -3013,13 +2978,13 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                /* continue assoc process */
                ifmgd->assoc_data->timeout = jiffies;
                ifmgd->assoc_data->timeout_started = true;
-               run_again(ifmgd, ifmgd->assoc_data->timeout);
-               return RX_MGMT_NONE;
+               run_again(sdata, ifmgd->assoc_data->timeout);
+               return;
        }
 
        if (!ifmgd->associated ||
            !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
-               return RX_MGMT_NONE;
+               return;
        bssid = ifmgd->associated->bssid;
 
        /* Track average RSSI from the Beacon frames of the current AP */
@@ -3165,7 +3130,7 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
        }
 
        if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid)
-               return RX_MGMT_NONE;
+               return;
        ifmgd->beacon_crc = ncrc;
        ifmgd->beacon_crc_valid = true;
 
@@ -3179,7 +3144,7 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
         * If we haven't had a beacon before, tell the driver about the
         * DTIM period (and beacon timing if desired) now.
         */
-       if (!bss_conf->dtim_period) {
+       if (!ifmgd->have_beacon) {
                /* a few bogus AP send dtim_period = 0 or no TIM IE */
                if (elems.tim)
                        bss_conf->dtim_period = elems.tim->dtim_period ?: 1;
@@ -3198,7 +3163,14 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                                sdata->vif.bss_conf.sync_dtim_count = 0;
                }
 
-               changed |= BSS_CHANGED_DTIM_PERIOD;
+               changed |= BSS_CHANGED_BEACON_INFO;
+               ifmgd->have_beacon = true;
+
+               mutex_lock(&local->iflist_mtx);
+               ieee80211_recalc_ps(local, -1);
+               mutex_unlock(&local->iflist_mtx);
+
+               ieee80211_recalc_ps_vif(sdata);
        }
 
        if (elems.erp_info) {
@@ -3220,7 +3192,9 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       WLAN_REASON_DEAUTH_LEAVING,
                                       true, deauth_buf);
-               return RX_MGMT_CFG80211_TX_DEAUTH;
+               cfg80211_tx_mlme_mgmt(sdata->dev, deauth_buf,
+                                     sizeof(deauth_buf));
+               return;
        }
 
        if (sta && elems.opmode_notif)
@@ -3237,19 +3211,13 @@ ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                                                       elems.pwr_constr_elem);
 
        ieee80211_bss_info_change_notify(sdata, changed);
-
-       return RX_MGMT_NONE;
 }
 
 void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
                                  struct sk_buff *skb)
 {
-       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_rx_status *rx_status;
        struct ieee80211_mgmt *mgmt;
-       struct cfg80211_bss *bss = NULL;
-       enum rx_mgmt_action rma = RX_MGMT_NONE;
-       u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
        u16 fc;
        struct ieee802_11_elems elems;
        int ies_len;
@@ -3258,28 +3226,27 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
        mgmt = (struct ieee80211_mgmt *) skb->data;
        fc = le16_to_cpu(mgmt->frame_control);
 
-       mutex_lock(&ifmgd->mtx);
+       sdata_lock(sdata);
 
        switch (fc & IEEE80211_FCTL_STYPE) {
        case IEEE80211_STYPE_BEACON:
-               rma = ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
-                                              deauth_buf, rx_status);
+               ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status);
                break;
        case IEEE80211_STYPE_PROBE_RESP:
                ieee80211_rx_mgmt_probe_resp(sdata, skb);
                break;
        case IEEE80211_STYPE_AUTH:
-               rma = ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len);
+               ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len);
                break;
        case IEEE80211_STYPE_DEAUTH:
-               rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
+               ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
                break;
        case IEEE80211_STYPE_DISASSOC:
-               rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
+               ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
                break;
        case IEEE80211_STYPE_ASSOC_RESP:
        case IEEE80211_STYPE_REASSOC_RESP:
-               rma = ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, &bss);
+               ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len);
                break;
        case IEEE80211_STYPE_ACTION:
                if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) {
@@ -3325,34 +3292,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
                }
                break;
        }
-       mutex_unlock(&ifmgd->mtx);
-
-       switch (rma) {
-       case RX_MGMT_NONE:
-               /* no action */
-               break;
-       case RX_MGMT_CFG80211_DEAUTH:
-               cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
-               break;
-       case RX_MGMT_CFG80211_DISASSOC:
-               cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
-               break;
-       case RX_MGMT_CFG80211_RX_AUTH:
-               cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, skb->len);
-               break;
-       case RX_MGMT_CFG80211_RX_ASSOC:
-               cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, skb->len);
-               break;
-       case RX_MGMT_CFG80211_ASSOC_TIMEOUT:
-               cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid);
-               break;
-       case RX_MGMT_CFG80211_TX_DEAUTH:
-               cfg80211_send_deauth(sdata->dev, deauth_buf,
-                                    sizeof(deauth_buf));
-               break;
-       default:
-               WARN(1, "unexpected: %d", rma);
-       }
+       sdata_unlock(sdata);
 }
 
 static void ieee80211_sta_timer(unsigned long data)
@@ -3366,20 +3306,13 @@ static void ieee80211_sta_timer(unsigned long data)
 static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
                                          u8 *bssid, u8 reason, bool tx)
 {
-       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
                               tx, frame_buf);
-       mutex_unlock(&ifmgd->mtx);
 
-       /*
-        * must be outside lock due to cfg80211,
-        * but that's not a problem.
-        */
-       cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN);
-
-       mutex_lock(&ifmgd->mtx);
+       cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
+                             IEEE80211_DEAUTH_FRAME_LEN);
 }
 
 static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
@@ -3389,7 +3322,7 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data;
        u32 tx_flags = 0;
 
-       lockdep_assert_held(&ifmgd->mtx);
+       sdata_assert_lock(sdata);
 
        if (WARN_ON_ONCE(!auth_data))
                return -EINVAL;
@@ -3462,7 +3395,7 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
        if (tx_flags == 0) {
                auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
                ifmgd->auth_data->timeout_started = true;
-               run_again(ifmgd, auth_data->timeout);
+               run_again(sdata, auth_data->timeout);
        } else {
                auth_data->timeout_started = false;
        }
@@ -3475,7 +3408,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
        struct ieee80211_local *local = sdata->local;
 
-       lockdep_assert_held(&sdata->u.mgd.mtx);
+       sdata_assert_lock(sdata);
 
        assoc_data->tries++;
        if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) {
@@ -3499,7 +3432,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
        if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
                assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
                assoc_data->timeout_started = true;
-               run_again(&sdata->u.mgd, assoc_data->timeout);
+               run_again(sdata, assoc_data->timeout);
        } else {
                assoc_data->timeout_started = false;
        }
@@ -3524,7 +3457,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-       mutex_lock(&ifmgd->mtx);
+       sdata_lock(sdata);
 
        if (ifmgd->status_received) {
                __le16 fc = ifmgd->status_fc;
@@ -3536,7 +3469,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                        if (status_acked) {
                                ifmgd->auth_data->timeout =
                                        jiffies + IEEE80211_AUTH_TIMEOUT_SHORT;
-                               run_again(ifmgd, ifmgd->auth_data->timeout);
+                               run_again(sdata, ifmgd->auth_data->timeout);
                        } else {
                                ifmgd->auth_data->timeout = jiffies - 1;
                        }
@@ -3547,7 +3480,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                        if (status_acked) {
                                ifmgd->assoc_data->timeout =
                                        jiffies + IEEE80211_ASSOC_TIMEOUT_SHORT;
-                               run_again(ifmgd, ifmgd->assoc_data->timeout);
+                               run_again(sdata, ifmgd->assoc_data->timeout);
                        } else {
                                ifmgd->assoc_data->timeout = jiffies - 1;
                        }
@@ -3570,30 +3503,22 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
 
                        ieee80211_destroy_auth_data(sdata, false);
 
-                       mutex_unlock(&ifmgd->mtx);
-                       cfg80211_send_auth_timeout(sdata->dev, bssid);
-                       mutex_lock(&ifmgd->mtx);
+                       cfg80211_auth_timeout(sdata->dev, bssid);
                }
        } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started)
-               run_again(ifmgd, ifmgd->auth_data->timeout);
+               run_again(sdata, ifmgd->auth_data->timeout);
 
        if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started &&
            time_after(jiffies, ifmgd->assoc_data->timeout)) {
-               if ((ifmgd->assoc_data->need_beacon &&
-                    !ifmgd->assoc_data->have_beacon) ||
+               if ((ifmgd->assoc_data->need_beacon && !ifmgd->have_beacon) ||
                    ieee80211_do_assoc(sdata)) {
-                       u8 bssid[ETH_ALEN];
-
-                       memcpy(bssid, ifmgd->assoc_data->bss->bssid, ETH_ALEN);
+                       struct cfg80211_bss *bss = ifmgd->assoc_data->bss;
 
                        ieee80211_destroy_assoc_data(sdata, false);
-
-                       mutex_unlock(&ifmgd->mtx);
-                       cfg80211_send_assoc_timeout(sdata->dev, bssid);
-                       mutex_lock(&ifmgd->mtx);
+                       cfg80211_assoc_timeout(sdata->dev, bss);
                }
        } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started)
-               run_again(ifmgd, ifmgd->assoc_data->timeout);
+               run_again(sdata, ifmgd->assoc_data->timeout);
 
        if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
                            IEEE80211_STA_CONNECTION_POLL) &&
@@ -3627,7 +3552,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                                        false);
                        }
                } else if (time_is_after_jiffies(ifmgd->probe_timeout))
-                       run_again(ifmgd, ifmgd->probe_timeout);
+                       run_again(sdata, ifmgd->probe_timeout);
                else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
                        mlme_dbg(sdata,
                                 "Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
@@ -3656,7 +3581,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                }
        }
 
-       mutex_unlock(&ifmgd->mtx);
+       sdata_unlock(sdata);
 }
 
 static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -3717,9 +3642,9 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 
-       mutex_lock(&ifmgd->mtx);
+       sdata_lock(sdata);
        if (!ifmgd->associated) {
-               mutex_unlock(&ifmgd->mtx);
+               sdata_unlock(sdata);
                return;
        }
 
@@ -3730,10 +3655,10 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
                                              ifmgd->associated->bssid,
                                              WLAN_REASON_UNSPECIFIED,
                                              true);
-               mutex_unlock(&ifmgd->mtx);
+               sdata_unlock(sdata);
                return;
        }
-       mutex_unlock(&ifmgd->mtx);
+       sdata_unlock(sdata);
 }
 #endif
 
@@ -3765,8 +3690,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
        ifmgd->uapsd_max_sp_len = sdata->local->hw.uapsd_max_sp_len;
        ifmgd->p2p_noa_index = -1;
 
-       mutex_init(&ifmgd->mtx);
-
        if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
                ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC;
        else
@@ -3923,6 +3846,12 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
         */
        ret = ieee80211_vif_use_channel(sdata, &chandef,
                                        IEEE80211_CHANCTX_SHARED);
+
+       /* don't downgrade for 5 and 10 MHz channels, though. */
+       if (chandef.width == NL80211_CHAN_WIDTH_5 ||
+           chandef.width == NL80211_CHAN_WIDTH_10)
+               return ret;
+
        while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
                ifmgd->flags |= chandef_downgrade(&chandef);
                ret = ieee80211_vif_use_channel(sdata, &chandef,
@@ -4122,8 +4051,6 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
 
        /* try to authenticate/probe */
 
-       mutex_lock(&ifmgd->mtx);
-
        if ((ifmgd->auth_data && !ifmgd->auth_data->done) ||
            ifmgd->assoc_data) {
                err = -EBUSY;
@@ -4143,8 +4070,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
                                       WLAN_REASON_UNSPECIFIED,
                                       false, frame_buf);
 
-               __cfg80211_send_deauth(sdata->dev, frame_buf,
-                                      sizeof(frame_buf));
+               cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
+                                     sizeof(frame_buf));
        }
 
        sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
@@ -4161,8 +4088,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
 
        /* hold our own reference */
        cfg80211_ref_bss(local->hw.wiphy, auth_data->bss);
-       err = 0;
-       goto out_unlock;
+       return 0;
 
  err_clear:
        memset(ifmgd->bssid, 0, ETH_ALEN);
@@ -4170,9 +4096,6 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        ifmgd->auth_data = NULL;
  err_free:
        kfree(auth_data);
- out_unlock:
-       mutex_unlock(&ifmgd->mtx);
-
        return err;
 }
 
@@ -4203,8 +4126,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        assoc_data->ssid_len = ssidie[1];
        rcu_read_unlock();
 
-       mutex_lock(&ifmgd->mtx);
-
        if (ifmgd->associated) {
                u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
@@ -4212,8 +4133,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                                       WLAN_REASON_UNSPECIFIED,
                                       false, frame_buf);
 
-               __cfg80211_send_deauth(sdata->dev, frame_buf,
-                                      sizeof(frame_buf));
+               cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
+                                     sizeof(frame_buf));
        }
 
        if (ifmgd->auth_data && !ifmgd->auth_data->done) {
@@ -4360,6 +4281,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 
        ifmgd->assoc_data = assoc_data;
        ifmgd->dtim_period = 0;
+       ifmgd->have_beacon = false;
 
        err = ieee80211_prep_connection(sdata, req->bss, true);
        if (err)
@@ -4391,7 +4313,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                        ifmgd->dtim_period = tim->dtim_period;
                        dtim_count = tim->dtim_count;
                }
-               assoc_data->have_beacon = true;
+               ifmgd->have_beacon = true;
                assoc_data->timeout = jiffies;
                assoc_data->timeout_started = true;
 
@@ -4407,7 +4329,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        }
        rcu_read_unlock();
 
-       run_again(ifmgd, assoc_data->timeout);
+       run_again(sdata, assoc_data->timeout);
 
        if (bss->corrupt_data) {
                char *corrupt_type = "data";
@@ -4423,17 +4345,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                           corrupt_type);
        }
 
-       err = 0;
-       goto out;
+       return 0;
  err_clear:
        memset(ifmgd->bssid, 0, ETH_ALEN);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
        ifmgd->assoc_data = NULL;
  err_free:
        kfree(assoc_data);
- out:
-       mutex_unlock(&ifmgd->mtx);
-
        return err;
 }
 
@@ -4445,8 +4363,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
        bool tx = !req->local_state_change;
        bool report_frame = false;
 
-       mutex_lock(&ifmgd->mtx);
-
        sdata_info(sdata,
                   "deauthenticating from %pM by local choice (reason=%d)\n",
                   req->bssid, req->reason_code);
@@ -4458,7 +4374,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                                               req->reason_code, tx,
                                               frame_buf);
                ieee80211_destroy_auth_data(sdata, false);
-               mutex_unlock(&ifmgd->mtx);
 
                report_frame = true;
                goto out;
@@ -4470,12 +4385,11 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                                       req->reason_code, tx, frame_buf);
                report_frame = true;
        }
-       mutex_unlock(&ifmgd->mtx);
 
  out:
        if (report_frame)
-               __cfg80211_send_deauth(sdata->dev, frame_buf,
-                                      IEEE80211_DEAUTH_FRAME_LEN);
+               cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
+                                     IEEE80211_DEAUTH_FRAME_LEN);
 
        return 0;
 }
@@ -4487,18 +4401,14 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
        u8 bssid[ETH_ALEN];
        u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
-       mutex_lock(&ifmgd->mtx);
-
        /*
         * cfg80211 should catch this ... but it's racy since
         * we can receive a disassoc frame, process it, hand it
         * to cfg80211 while that's in a locked section already
         * trying to tell us that the user wants to disconnect.
         */
-       if (ifmgd->associated != req->bss) {
-               mutex_unlock(&ifmgd->mtx);
+       if (ifmgd->associated != req->bss)
                return -ENOLINK;
-       }
 
        sdata_info(sdata,
                   "disassociating from %pM by local choice (reason=%d)\n",
@@ -4508,10 +4418,9 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC,
                               req->reason_code, !req->local_state_change,
                               frame_buf);
-       mutex_unlock(&ifmgd->mtx);
 
-       __cfg80211_send_disassoc(sdata->dev, frame_buf,
-                                IEEE80211_DEAUTH_FRAME_LEN);
+       cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
+                             IEEE80211_DEAUTH_FRAME_LEN);
 
        return 0;
 }
@@ -4531,13 +4440,16 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
        cancel_work_sync(&ifmgd->csa_connection_drop_work);
        cancel_work_sync(&ifmgd->chswitch_work);
 
-       mutex_lock(&ifmgd->mtx);
-       if (ifmgd->assoc_data)
+       sdata_lock(sdata);
+       if (ifmgd->assoc_data) {
+               struct cfg80211_bss *bss = ifmgd->assoc_data->bss;
                ieee80211_destroy_assoc_data(sdata, false);
+               cfg80211_assoc_timeout(sdata->dev, bss);
+       }
        if (ifmgd->auth_data)
                ieee80211_destroy_auth_data(sdata, false);
        del_timer_sync(&ifmgd->timer);
-       mutex_unlock(&ifmgd->mtx);
+       sdata_unlock(sdata);
 }
 
 void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
index a02bef35b134e28e6d5ad8d6da38c505c309e756..30d58d2d13e26a667024a3a2fca655b6fce1bd34 100644 (file)
@@ -397,8 +397,14 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
                        return;
 
                /* if HT BSS, and we handle a data frame, also try HT rates */
-               if (chan_width == NL80211_CHAN_WIDTH_20_NOHT)
+               switch (chan_width) {
+               case NL80211_CHAN_WIDTH_20_NOHT:
+               case NL80211_CHAN_WIDTH_5:
+               case NL80211_CHAN_WIDTH_10:
                        return;
+               default:
+                       break;
+               }
 
                alt_rate.idx = 0;
                /* keep protection flags */
index 8e29526202568f0223401659f4301500d5f9d424..23dbcfc69b3b7f09ffc35c81d03dddc38d36da5a 100644 (file)
@@ -258,6 +258,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        pos += 2;
 
        if (status->flag & RX_FLAG_HT) {
+               unsigned int stbc;
+
                rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
                *pos++ = local->hw.radiotap_mcs_details;
                *pos = 0;
@@ -267,6 +269,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                        *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
                if (status->flag & RX_FLAG_HT_GF)
                        *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
+               stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT;
+               *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
                pos++;
                *pos++ = status->rate_idx;
        }
@@ -1372,6 +1376,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       int i;
 
        if (!sta)
                return RX_CONTINUE;
@@ -1422,6 +1427,19 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                ewma_add(&sta->avg_signal, -status->signal);
        }
 
+       if (status->chains) {
+               sta->chains = status->chains;
+               for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
+                       int signal = status->chain_signal[i];
+
+                       if (!(status->chains & BIT(i)))
+                               continue;
+
+                       sta->chain_signal_last[i] = signal;
+                       ewma_add(&sta->chain_signal_avg[i], -signal);
+               }
+       }
+
        /*
         * Change STA power saving mode only at the end of a frame
         * exchange sequence.
@@ -1608,7 +1626,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
                        entry->ccmp = 1;
                        memcpy(entry->last_pn,
                               rx->key->u.ccmp.rx_pn[queue],
-                              CCMP_PN_LEN);
+                              IEEE80211_CCMP_PN_LEN);
                }
                return RX_QUEUED;
        }
@@ -1627,21 +1645,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
         * (IEEE 802.11i, 8.3.3.4.5) */
        if (entry->ccmp) {
                int i;
-               u8 pn[CCMP_PN_LEN], *rpn;
+               u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
                int queue;
                if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
                        return RX_DROP_UNUSABLE;
-               memcpy(pn, entry->last_pn, CCMP_PN_LEN);
-               for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
+               memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
+               for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
                        pn[i]++;
                        if (pn[i])
                                break;
                }
                queue = rx->security_idx;
                rpn = rx->key->u.ccmp.rx_pn[queue];
-               if (memcmp(pn, rpn, CCMP_PN_LEN))
+               if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
                        return RX_DROP_UNUSABLE;
-               memcpy(entry->last_pn, pn, CCMP_PN_LEN);
+               memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
        }
 
        skb_pull(rx->skb, ieee80211_hdrlen(fc));
@@ -1729,27 +1747,21 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
                if (unlikely(!ieee80211_has_protected(fc) &&
                             ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
                             rx->key)) {
-                       if (ieee80211_is_deauth(fc))
-                               cfg80211_send_unprot_deauth(rx->sdata->dev,
-                                                           rx->skb->data,
-                                                           rx->skb->len);
-                       else if (ieee80211_is_disassoc(fc))
-                               cfg80211_send_unprot_disassoc(rx->sdata->dev,
-                                                             rx->skb->data,
-                                                             rx->skb->len);
+                       if (ieee80211_is_deauth(fc) ||
+                           ieee80211_is_disassoc(fc))
+                               cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
+                                                            rx->skb->data,
+                                                            rx->skb->len);
                        return -EACCES;
                }
                /* BIP does not use Protected field, so need to check MMIE */
                if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
                             ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
-                       if (ieee80211_is_deauth(fc))
-                               cfg80211_send_unprot_deauth(rx->sdata->dev,
-                                                           rx->skb->data,
-                                                           rx->skb->len);
-                       else if (ieee80211_is_disassoc(fc))
-                               cfg80211_send_unprot_disassoc(rx->sdata->dev,
-                                                             rx->skb->data,
-                                                             rx->skb->len);
+                       if (ieee80211_is_deauth(fc) ||
+                           ieee80211_is_disassoc(fc))
+                               cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
+                                                            rx->skb->data,
+                                                            rx->skb->len);
                        return -EACCES;
                }
                /*
index 99b103921a4befecdaa8d95a1391d3f040479fb1..1b122a79b0d8369a4118a6f238dc6d69db9e072c 100644 (file)
@@ -140,6 +140,15 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
                        bss->valid_data |= IEEE80211_BSS_VALID_WMM;
        }
 
+       if (beacon) {
+               struct ieee80211_supported_band *sband =
+                       local->hw.wiphy->bands[rx_status->band];
+               if (!(rx_status->flag & RX_FLAG_HT) &&
+                   !(rx_status->flag & RX_FLAG_VHT))
+                       bss->beacon_rate =
+                               &sband->bitrates[rx_status->rate_idx];
+       }
+
        return bss;
 }
 
index 11216bc13b27565a4734638014ad01f5d2d88911..aeb967a0aeed5ccc49b6c45c5ac13d0d97f97130 100644 (file)
@@ -149,6 +149,7 @@ static void cleanup_single_sta(struct sta_info *sta)
         * directly by station destruction.
         */
        for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
+               kfree(sta->ampdu_mlme.tid_start_tx[i]);
                tid_tx = rcu_dereference_raw(sta->ampdu_mlme.tid_tx[i]);
                if (!tid_tx)
                        continue;
@@ -346,6 +347,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        if (ieee80211_vif_is_mesh(&sdata->vif) &&
            !sdata->u.mesh.user_mpm)
                init_timer(&sta->plink_timer);
+       sta->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
 #endif
 
        memcpy(sta->sta.addr, addr, ETH_ALEN);
@@ -358,6 +360,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        do_posix_clock_monotonic_gettime(&uptime);
        sta->last_connected = uptime.tv_sec;
        ewma_init(&sta->avg_signal, 1024, 8);
+       for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
+               ewma_init(&sta->chain_signal_avg[i], 1024, 8);
 
        if (sta_prepare_rate_control(local, sta, gfp)) {
                kfree(sta);
@@ -1130,6 +1134,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
         * ends the poll/service period.
         */
        info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER |
+                      IEEE80211_TX_CTL_PS_RESPONSE |
                       IEEE80211_TX_STATUS_EOSP |
                       IEEE80211_TX_CTL_REQ_TX_STATUS;
 
@@ -1267,7 +1272,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
                         * STA may still remain is PS mode after this frame
                         * exchange.
                         */
-                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER |
+                                      IEEE80211_TX_CTL_PS_RESPONSE;
 
                        /*
                         * Use MoreData flag to indicate whether there are
index adc30045f99ec9f029c0bd8cfc0a4d7c75b10403..4208dbd5861f4d87cc8fa94aefc7c00f51e72eed 100644 (file)
@@ -203,6 +203,7 @@ struct tid_ampdu_rx {
  *     driver requested to close until the work for it runs
  * @mtx: mutex to protect all TX data (except non-NULL assignments
  *     to tid_tx[idx], which are protected by the sta spinlock)
+ *     tid_start_tx is also protected by sta->lock.
  */
 struct sta_ampdu_mlme {
        struct mutex mtx;
@@ -297,6 +298,9 @@ struct sta_ampdu_mlme {
  * @rcu_head: RCU head used for freeing this station struct
  * @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
  *     taken from HT/VHT capabilities or VHT operating mode notification
+ * @chains: chains ever used for RX from this station
+ * @chain_signal_last: last signal (per chain)
+ * @chain_signal_avg: signal average (per chain)
  */
 struct sta_info {
        /* General information, mostly static */
@@ -344,6 +348,11 @@ struct sta_info {
        int last_signal;
        struct ewma avg_signal;
        int last_ack_signal;
+
+       u8 chains;
+       s8 chain_signal_last[IEEE80211_MAX_CHAINS];
+       struct ewma chain_signal_avg[IEEE80211_MAX_CHAINS];
+
        /* Plus 1 for non-QoS frames */
        __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
 
index 9972e07a2f9650315521560cfb48267504476200..4105d0ca963e25dfdd558e30c48274277e5ad2df 100644 (file)
@@ -398,13 +398,14 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
        if (ieee80211_has_order(hdr->frame_control))
                return TX_CONTINUE;
 
+       if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+               info->hw_queue = tx->sdata->vif.cab_queue;
+
        /* no stations in PS mode */
        if (!atomic_read(&ps->num_sta_ps))
                return TX_CONTINUE;
 
        info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
-       if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
-               info->hw_queue = tx->sdata->vif.cab_queue;
 
        /* device releases frame after DTIM beacon */
        if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
@@ -1789,12 +1790,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                break;
 #ifdef CONFIG_MAC80211_MESH
        case NL80211_IFTYPE_MESH_POINT:
-               if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
-                       /* Do not send frames with mesh_ttl == 0 */
-                       sdata->u.mesh.mshstats.dropped_frames_ttl++;
-                       goto fail_rcu;
-               }
-
                if (!is_multicast_ether_addr(skb->data)) {
                        struct sta_info *next_hop;
                        bool mpp_lookup = true;
index 72e6292955bb9eb3b896088d3dc756771e1ed3b4..22654452a56157b44835f97efacdf2e8960ee5bc 100644 (file)
@@ -560,6 +560,9 @@ void ieee80211_iterate_active_interfaces(
        list_for_each_entry(sdata, &local->interfaces, list) {
                switch (sdata->vif.type) {
                case NL80211_IFTYPE_MONITOR:
+                       if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
+                               continue;
+                       break;
                case NL80211_IFTYPE_AP_VLAN:
                        continue;
                default:
@@ -598,6 +601,9 @@ void ieee80211_iterate_active_interfaces_atomic(
        list_for_each_entry_rcu(sdata, &local->interfaces, list) {
                switch (sdata->vif.type) {
                case NL80211_IFTYPE_MONITOR:
+                       if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
+                               continue;
+                       break;
                case NL80211_IFTYPE_AP_VLAN:
                        continue;
                default:
@@ -1072,32 +1078,6 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
        ieee80211_set_wmm_default(sdata, true);
 }
 
-u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
-                             enum ieee80211_band band)
-{
-       struct ieee80211_supported_band *sband;
-       struct ieee80211_rate *bitrates;
-       u32 mandatory_rates;
-       enum ieee80211_rate_flags mandatory_flag;
-       int i;
-
-       sband = local->hw.wiphy->bands[band];
-       if (WARN_ON(!sband))
-               return 1;
-
-       if (band == IEEE80211_BAND_2GHZ)
-               mandatory_flag = IEEE80211_RATE_MANDATORY_B;
-       else
-               mandatory_flag = IEEE80211_RATE_MANDATORY_A;
-
-       bitrates = sband->bitrates;
-       mandatory_rates = 0;
-       for (i = 0; i < sband->n_bitrates; i++)
-               if (bitrates[i].flags & mandatory_flag)
-                       mandatory_rates |= BIT(i);
-       return mandatory_rates;
-}
-
 void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
                         u16 transaction, u16 auth_alg, u16 status,
                         const u8 *extra, size_t extra_len, const u8 *da,
@@ -1604,12 +1584,13 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                                   BSS_CHANGED_ARP_FILTER |
                                   BSS_CHANGED_PS;
 
-                       if (sdata->u.mgd.dtim_period)
-                               changed |= BSS_CHANGED_DTIM_PERIOD;
+                       /* Re-send beacon info report to the driver */
+                       if (sdata->u.mgd.have_beacon)
+                               changed |= BSS_CHANGED_BEACON_INFO;
 
-                       mutex_lock(&sdata->u.mgd.mtx);
+                       sdata_lock(sdata);
                        ieee80211_bss_info_change_notify(sdata, changed);
-                       mutex_unlock(&sdata->u.mgd.mtx);
+                       sdata_unlock(sdata);
                        break;
                case NL80211_IFTYPE_ADHOC:
                        changed |= BSS_CHANGED_IBSS;
index 171344d4eb7c69077edd1047d6c1d5176a74b351..97c289414e32a5d989528dd9bba7c8b77ec982bf 100644 (file)
@@ -396,7 +396,7 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
        new_bw = ieee80211_sta_cur_vht_bw(sta);
        if (new_bw != sta->sta.bandwidth) {
                sta->sta.bandwidth = new_bw;
-               changed |= IEEE80211_RC_NSS_CHANGED;
+               changed |= IEEE80211_RC_BW_CHANGED;
        }
 
  change:
index c04d401dae9243517007a193acd3e7b1fb0478b0..6ee2b586357275aa33c13454b37d15b970cf1181 100644 (file)
@@ -28,7 +28,7 @@
 int ieee80211_wep_init(struct ieee80211_local *local)
 {
        /* start WEP IV from a random value */
-       get_random_bytes(&local->wep_iv, WEP_IV_LEN);
+       get_random_bytes(&local->wep_iv, IEEE80211_WEP_IV_LEN);
 
        local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(local->wep_tx_tfm)) {
@@ -98,20 +98,21 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
 
        hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
 
-       if (WARN_ON(skb_tailroom(skb) < WEP_ICV_LEN ||
-                   skb_headroom(skb) < WEP_IV_LEN))
+       if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN ||
+                   skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
                return NULL;
 
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
-       newhdr = skb_push(skb, WEP_IV_LEN);
-       memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
+       newhdr = skb_push(skb, IEEE80211_WEP_IV_LEN);
+       memmove(newhdr, newhdr + IEEE80211_WEP_IV_LEN, hdrlen);
 
        /* the HW only needs room for the IV, but not the actual IV */
        if (info->control.hw_key &&
            (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
                return newhdr + hdrlen;
 
-       skb_set_network_header(skb, skb_network_offset(skb) + WEP_IV_LEN);
+       skb_set_network_header(skb, skb_network_offset(skb) +
+                                   IEEE80211_WEP_IV_LEN);
        ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen);
        return newhdr + hdrlen;
 }
@@ -125,8 +126,8 @@ static void ieee80211_wep_remove_iv(struct ieee80211_local *local,
        unsigned int hdrlen;
 
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
-       memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen);
-       skb_pull(skb, WEP_IV_LEN);
+       memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen);
+       skb_pull(skb, IEEE80211_WEP_IV_LEN);
 }
 
 
@@ -146,7 +147,7 @@ int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
        put_unaligned(icv, (__le32 *)(data + data_len));
 
        crypto_cipher_setkey(tfm, rc4key, klen);
-       for (i = 0; i < data_len + WEP_ICV_LEN; i++)
+       for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++)
                crypto_cipher_encrypt_one(tfm, data + i, data + i);
 
        return 0;
@@ -172,7 +173,7 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
        if (!iv)
                return -1;
 
-       len = skb->len - (iv + WEP_IV_LEN - skb->data);
+       len = skb->len - (iv + IEEE80211_WEP_IV_LEN - skb->data);
 
        /* Prepend 24-bit IV to RC4 key */
        memcpy(rc4key, iv, 3);
@@ -181,10 +182,10 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
        memcpy(rc4key + 3, key, keylen);
 
        /* Add room for ICV */
-       skb_put(skb, WEP_ICV_LEN);
+       skb_put(skb, IEEE80211_WEP_ICV_LEN);
 
        return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3,
-                                         iv + WEP_IV_LEN, len);
+                                         iv + IEEE80211_WEP_IV_LEN, len);
 }
 
 
@@ -201,11 +202,11 @@ int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
                return -1;
 
        crypto_cipher_setkey(tfm, rc4key, klen);
-       for (i = 0; i < data_len + WEP_ICV_LEN; i++)
+       for (i = 0; i < data_len + IEEE80211_WEP_ICV_LEN; i++)
                crypto_cipher_decrypt_one(tfm, data + i, data + i);
 
        crc = cpu_to_le32(~crc32_le(~0, data, data_len));
-       if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0)
+       if (memcmp(&crc, data + data_len, IEEE80211_WEP_ICV_LEN) != 0)
                /* ICV mismatch */
                return -1;
 
@@ -237,10 +238,10 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local,
                return -1;
 
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
-       if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN)
+       if (skb->len < hdrlen + IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN)
                return -1;
 
-       len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN;
+       len = skb->len - hdrlen - IEEE80211_WEP_IV_LEN - IEEE80211_WEP_ICV_LEN;
 
        keyidx = skb->data[hdrlen + 3] >> 6;
 
@@ -256,16 +257,16 @@ static int ieee80211_wep_decrypt(struct ieee80211_local *local,
        memcpy(rc4key + 3, key->conf.key, key->conf.keylen);
 
        if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen,
-                                      skb->data + hdrlen + WEP_IV_LEN,
-                                      len))
+                                      skb->data + hdrlen +
+                                      IEEE80211_WEP_IV_LEN, len))
                ret = -1;
 
        /* Trim ICV */
-       skb_trim(skb, skb->len - WEP_ICV_LEN);
+       skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
 
        /* Remove IV */
-       memmove(skb->data + WEP_IV_LEN, skb->data, hdrlen);
-       skb_pull(skb, WEP_IV_LEN);
+       memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen);
+       skb_pull(skb, IEEE80211_WEP_IV_LEN);
 
        return ret;
 }
@@ -305,13 +306,14 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
                if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key))
                        return RX_DROP_UNUSABLE;
        } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
-               if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) + WEP_IV_LEN))
+               if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) +
+                                           IEEE80211_WEP_IV_LEN))
                        return RX_DROP_UNUSABLE;
                if (rx->sta && ieee80211_wep_is_weak_iv(rx->skb, rx->key))
                        rx->sta->wep_weak_iv_count++;
                ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
                /* remove ICV */
-               if (pskb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN))
+               if (pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
                        return RX_DROP_UNUSABLE;
        }
 
index c7c6d644486fc48354c332483db73b2454d781a5..c9edfcb7a13b65ad6d766b182e54cf6cfcc83c0b 100644 (file)
@@ -62,10 +62,10 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
 
        tail = MICHAEL_MIC_LEN;
        if (!info->control.hw_key)
-               tail += TKIP_ICV_LEN;
+               tail += IEEE80211_TKIP_ICV_LEN;
 
        if (WARN_ON(skb_tailroom(skb) < tail ||
-                   skb_headroom(skb) < TKIP_IV_LEN))
+                   skb_headroom(skb) < IEEE80211_TKIP_IV_LEN))
                return TX_DROP;
 
        key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
@@ -198,15 +198,16 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
        if (info->control.hw_key)
                tail = 0;
        else
-               tail = TKIP_ICV_LEN;
+               tail = IEEE80211_TKIP_ICV_LEN;
 
        if (WARN_ON(skb_tailroom(skb) < tail ||
-                   skb_headroom(skb) < TKIP_IV_LEN))
+                   skb_headroom(skb) < IEEE80211_TKIP_IV_LEN))
                return -1;
 
-       pos = skb_push(skb, TKIP_IV_LEN);
-       memmove(pos, pos + TKIP_IV_LEN, hdrlen);
-       skb_set_network_header(skb, skb_network_offset(skb) + TKIP_IV_LEN);
+       pos = skb_push(skb, IEEE80211_TKIP_IV_LEN);
+       memmove(pos, pos + IEEE80211_TKIP_IV_LEN, hdrlen);
+       skb_set_network_header(skb, skb_network_offset(skb) +
+                                   IEEE80211_TKIP_IV_LEN);
        pos += hdrlen;
 
        /* the HW only needs room for the IV, but not the actual IV */
@@ -227,7 +228,7 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
                return 0;
 
        /* Add room for ICV */
-       skb_put(skb, TKIP_ICV_LEN);
+       skb_put(skb, IEEE80211_TKIP_ICV_LEN);
 
        return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm,
                                           key, skb, pos, len);
@@ -290,11 +291,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
                return RX_DROP_UNUSABLE;
 
        /* Trim ICV */
-       skb_trim(skb, skb->len - TKIP_ICV_LEN);
+       skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
 
        /* Remove IV */
-       memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen);
-       skb_pull(skb, TKIP_IV_LEN);
+       memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen);
+       skb_pull(skb, IEEE80211_TKIP_IV_LEN);
 
        return RX_CONTINUE;
 }
@@ -337,9 +338,9 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch,
        else
                qos_tid = 0;
 
-       data_len = skb->len - hdrlen - CCMP_HDR_LEN;
+       data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN;
        if (encrypted)
-               data_len -= CCMP_MIC_LEN;
+               data_len -= IEEE80211_CCMP_MIC_LEN;
 
        /* First block, b_0 */
        b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */
@@ -348,7 +349,7 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch,
         */
        b_0[1] = qos_tid | (mgmt << 4);
        memcpy(&b_0[2], hdr->addr2, ETH_ALEN);
-       memcpy(&b_0[8], pn, CCMP_PN_LEN);
+       memcpy(&b_0[8], pn, IEEE80211_CCMP_PN_LEN);
        /* l(m) */
        put_unaligned_be16(data_len, &b_0[14]);
 
@@ -424,15 +425,16 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
        if (info->control.hw_key)
                tail = 0;
        else
-               tail = CCMP_MIC_LEN;
+               tail = IEEE80211_CCMP_MIC_LEN;
 
        if (WARN_ON(skb_tailroom(skb) < tail ||
-                   skb_headroom(skb) < CCMP_HDR_LEN))
+                   skb_headroom(skb) < IEEE80211_CCMP_HDR_LEN))
                return -1;
 
-       pos = skb_push(skb, CCMP_HDR_LEN);
-       memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
-       skb_set_network_header(skb, skb_network_offset(skb) + CCMP_HDR_LEN);
+       pos = skb_push(skb, IEEE80211_CCMP_HDR_LEN);
+       memmove(pos, pos + IEEE80211_CCMP_HDR_LEN, hdrlen);
+       skb_set_network_header(skb, skb_network_offset(skb) +
+                                   IEEE80211_CCMP_HDR_LEN);
 
        /* the HW only needs room for the IV, but not the actual IV */
        if (info->control.hw_key &&
@@ -457,10 +459,10 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
        if (info->control.hw_key)
                return 0;
 
-       pos += CCMP_HDR_LEN;
+       pos += IEEE80211_CCMP_HDR_LEN;
        ccmp_special_blocks(skb, pn, scratch, 0);
        ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, scratch, pos, len,
-                                 pos, skb_put(skb, CCMP_MIC_LEN));
+                                 pos, skb_put(skb, IEEE80211_CCMP_MIC_LEN));
 
        return 0;
 }
@@ -490,7 +492,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
        struct ieee80211_key *key = rx->key;
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-       u8 pn[CCMP_PN_LEN];
+       u8 pn[IEEE80211_CCMP_PN_LEN];
        int data_len;
        int queue;
 
@@ -500,12 +502,13 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
            !ieee80211_is_robust_mgmt_frame(hdr))
                return RX_CONTINUE;
 
-       data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN;
+       data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN -
+                  IEEE80211_CCMP_MIC_LEN;
        if (!rx->sta || data_len < 0)
                return RX_DROP_UNUSABLE;
 
        if (status->flag & RX_FLAG_DECRYPTED) {
-               if (!pskb_may_pull(rx->skb, hdrlen + CCMP_HDR_LEN))
+               if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN))
                        return RX_DROP_UNUSABLE;
        } else {
                if (skb_linearize(rx->skb))
@@ -516,7 +519,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
 
        queue = rx->security_idx;
 
-       if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) {
+       if (memcmp(pn, key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN) <= 0) {
                key->u.ccmp.replays++;
                return RX_DROP_UNUSABLE;
        }
@@ -528,19 +531,20 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
 
                if (ieee80211_aes_ccm_decrypt(
                            key->u.ccmp.tfm, scratch,
-                           skb->data + hdrlen + CCMP_HDR_LEN, data_len,
-                           skb->data + skb->len - CCMP_MIC_LEN,
-                           skb->data + hdrlen + CCMP_HDR_LEN))
+                           skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
+                           data_len,
+                           skb->data + skb->len - IEEE80211_CCMP_MIC_LEN,
+                           skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN))
                        return RX_DROP_UNUSABLE;
        }
 
-       memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN);
+       memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
 
        /* Remove CCMP header and MIC */
-       if (pskb_trim(skb, skb->len - CCMP_MIC_LEN))
+       if (pskb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN))
                return RX_DROP_UNUSABLE;
-       memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen);
-       skb_pull(skb, CCMP_HDR_LEN);
+       memmove(skb->data + IEEE80211_CCMP_HDR_LEN, skb->data, hdrlen);
+       skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
 
        return RX_CONTINUE;
 }
index 40d2527693da8170b4edd9d0d50837255387f1eb..dc96a83aa6abb6659311e11ef13f97dc81c3b494 100644 (file)
@@ -44,6 +44,47 @@ DEFINE_MUTEX(nfc_devlist_mutex);
 /* NFC device ID bitmap */
 static DEFINE_IDA(nfc_index_ida);
 
+int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name)
+{
+       int rc = 0;
+
+       pr_debug("%s do firmware %s\n", dev_name(&dev->dev), firmware_name);
+
+       device_lock(&dev->dev);
+
+       if (!device_is_registered(&dev->dev)) {
+               rc = -ENODEV;
+               goto error;
+       }
+
+       if (dev->dev_up) {
+               rc = -EBUSY;
+               goto error;
+       }
+
+       if (!dev->ops->fw_upload) {
+               rc = -EOPNOTSUPP;
+               goto error;
+       }
+
+       dev->fw_upload_in_progress = true;
+       rc = dev->ops->fw_upload(dev, firmware_name);
+       if (rc)
+               dev->fw_upload_in_progress = false;
+
+error:
+       device_unlock(&dev->dev);
+       return rc;
+}
+
+int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name)
+{
+       dev->fw_upload_in_progress = false;
+
+       return nfc_genl_fw_upload_done(dev, firmware_name);
+}
+EXPORT_SYMBOL(nfc_fw_upload_done);
+
 /**
  * nfc_dev_up - turn on the NFC device
  *
@@ -69,6 +110,11 @@ int nfc_dev_up(struct nfc_dev *dev)
                goto error;
        }
 
+       if (dev->fw_upload_in_progress) {
+               rc = -EBUSY;
+               goto error;
+       }
+
        if (dev->dev_up) {
                rc = -EALREADY;
                goto error;
@@ -80,6 +126,13 @@ int nfc_dev_up(struct nfc_dev *dev)
        if (!rc)
                dev->dev_up = true;
 
+       /* We have to enable the device before discovering SEs */
+       if (dev->ops->discover_se) {
+               rc = dev->ops->discover_se(dev);
+               if (!rc)
+                       pr_warn("SE discovery failed\n");
+       }
+
 error:
        device_unlock(&dev->dev);
        return rc;
@@ -475,6 +528,108 @@ error:
        return rc;
 }
 
+static struct nfc_se *find_se(struct nfc_dev *dev, u32 se_idx)
+{
+       struct nfc_se *se, *n;
+
+       list_for_each_entry_safe(se, n, &dev->secure_elements, list)
+               if (se->idx == se_idx)
+                       return se;
+
+       return NULL;
+}
+
+int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
+{
+
+       struct nfc_se *se;
+       int rc;
+
+       pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx);
+
+       device_lock(&dev->dev);
+
+       if (!device_is_registered(&dev->dev)) {
+               rc = -ENODEV;
+               goto error;
+       }
+
+       if (!dev->dev_up) {
+               rc = -ENODEV;
+               goto error;
+       }
+
+       if (dev->polling) {
+               rc = -EBUSY;
+               goto error;
+       }
+
+       if (!dev->ops->enable_se || !dev->ops->disable_se) {
+               rc = -EOPNOTSUPP;
+               goto error;
+       }
+
+       se = find_se(dev, se_idx);
+       if (!se) {
+               rc = -EINVAL;
+               goto error;
+       }
+
+       if (se->type == NFC_SE_ENABLED) {
+               rc = -EALREADY;
+               goto error;
+       }
+
+       rc = dev->ops->enable_se(dev, se_idx);
+
+error:
+       device_unlock(&dev->dev);
+       return rc;
+}
+
+int nfc_disable_se(struct nfc_dev *dev, u32 se_idx)
+{
+
+       struct nfc_se *se;
+       int rc;
+
+       pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx);
+
+       device_lock(&dev->dev);
+
+       if (!device_is_registered(&dev->dev)) {
+               rc = -ENODEV;
+               goto error;
+       }
+
+       if (!dev->dev_up) {
+               rc = -ENODEV;
+               goto error;
+       }
+
+       if (!dev->ops->enable_se || !dev->ops->disable_se) {
+               rc = -EOPNOTSUPP;
+               goto error;
+       }
+
+       se = find_se(dev, se_idx);
+       if (!se) {
+               rc = -EINVAL;
+               goto error;
+       }
+
+       if (se->type == NFC_SE_DISABLED) {
+               rc = -EALREADY;
+               goto error;
+       }
+
+       rc = dev->ops->disable_se(dev, se_idx);
+
+error:
+       device_unlock(&dev->dev);
+       return rc;
+}
+
 int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
 {
        pr_debug("dev_name=%s gb_len=%d\n", dev_name(&dev->dev), gb_len);
@@ -707,14 +862,79 @@ inline void nfc_driver_failure(struct nfc_dev *dev, int err)
 }
 EXPORT_SYMBOL(nfc_driver_failure);
 
+int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type)
+{
+       struct nfc_se *se;
+       int rc;
+
+       pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx);
+
+       se = find_se(dev, se_idx);
+       if (se)
+               return -EALREADY;
+
+       se = kzalloc(sizeof(struct nfc_se), GFP_KERNEL);
+       if (!se)
+               return -ENOMEM;
+
+       se->idx = se_idx;
+       se->type = type;
+       se->state = NFC_SE_DISABLED;
+       INIT_LIST_HEAD(&se->list);
+
+       list_add(&se->list, &dev->secure_elements);
+
+       rc = nfc_genl_se_added(dev, se_idx, type);
+       if (rc < 0) {
+               list_del(&se->list);
+               kfree(se);
+
+               return rc;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(nfc_add_se);
+
+int nfc_remove_se(struct nfc_dev *dev, u32 se_idx)
+{
+       struct nfc_se *se, *n;
+       int rc;
+
+       pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx);
+
+       list_for_each_entry_safe(se, n, &dev->secure_elements, list)
+               if (se->idx == se_idx) {
+                       rc = nfc_genl_se_removed(dev, se_idx);
+                       if (rc < 0)
+                               return rc;
+
+                       list_del(&se->list);
+                       kfree(se);
+
+                       return 0;
+               }
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL(nfc_remove_se);
+
 static void nfc_release(struct device *d)
 {
        struct nfc_dev *dev = to_nfc_dev(d);
+       struct nfc_se *se, *n;
 
        pr_debug("dev_name=%s\n", dev_name(&dev->dev));
 
        nfc_genl_data_exit(&dev->genl_data);
        kfree(dev->targets);
+
+       list_for_each_entry_safe(se, n, &dev->secure_elements, list) {
+                       nfc_genl_se_removed(dev, se->idx);
+                       list_del(&se->list);
+                       kfree(se);
+       }
+
        kfree(dev);
 }
 
@@ -786,7 +1006,6 @@ struct nfc_dev *nfc_get_device(unsigned int idx)
  */
 struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
                                    u32 supported_protocols,
-                                   u32 supported_se,
                                    int tx_headroom, int tx_tailroom)
 {
        struct nfc_dev *dev;
@@ -804,10 +1023,9 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
 
        dev->ops = ops;
        dev->supported_protocols = supported_protocols;
-       dev->supported_se = supported_se;
-       dev->active_se = NFC_SE_NONE;
        dev->tx_headroom = tx_headroom;
        dev->tx_tailroom = tx_tailroom;
+       INIT_LIST_HEAD(&dev->secure_elements);
 
        nfc_genl_data_init(&dev->genl_data);
 
index 91020b210d8774416467bf009130cad9db5870a1..7b1c186736ebf924977ed8eb9040357ab88f3c3c 100644 (file)
@@ -570,21 +570,21 @@ static int hci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
 {
        struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
 
-       if (hdev->ops->dep_link_up)
-               return hdev->ops->dep_link_up(hdev, target, comm_mode,
-                                               gb, gb_len);
+       if (!hdev->ops->dep_link_up)
+               return 0;
 
-       return 0;
+       return hdev->ops->dep_link_up(hdev, target, comm_mode,
+                                     gb, gb_len);
 }
 
 static int hci_dep_link_down(struct nfc_dev *nfc_dev)
 {
        struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
 
-       if (hdev->ops->dep_link_down)
-               return hdev->ops->dep_link_down(hdev);
+       if (!hdev->ops->dep_link_down)
+               return 0;
 
-       return 0;
+       return hdev->ops->dep_link_down(hdev);
 }
 
 static int hci_activate_target(struct nfc_dev *nfc_dev,
@@ -673,12 +673,12 @@ static int hci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
 {
        struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
 
-       if (hdev->ops->tm_send)
-               return hdev->ops->tm_send(hdev, skb);
-
-       kfree_skb(skb);
+       if (!hdev->ops->tm_send) {
+               kfree_skb(skb);
+               return -ENOTSUPP;
+       }
 
-       return -ENOTSUPP;
+       return hdev->ops->tm_send(hdev, skb);
 }
 
 static int hci_check_presence(struct nfc_dev *nfc_dev,
@@ -686,8 +686,38 @@ static int hci_check_presence(struct nfc_dev *nfc_dev,
 {
        struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
 
-       if (hdev->ops->check_presence)
-               return hdev->ops->check_presence(hdev, target);
+       if (!hdev->ops->check_presence)
+               return 0;
+
+       return hdev->ops->check_presence(hdev, target);
+}
+
+static int hci_discover_se(struct nfc_dev *nfc_dev)
+{
+       struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+       if (hdev->ops->discover_se)
+               return hdev->ops->discover_se(hdev);
+
+       return 0;
+}
+
+static int hci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx)
+{
+       struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+       if (hdev->ops->enable_se)
+               return hdev->ops->enable_se(hdev, se_idx);
+
+       return 0;
+}
+
+static int hci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
+{
+       struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+       if (hdev->ops->disable_se)
+               return hdev->ops->enable_se(hdev, se_idx);
 
        return 0;
 }
@@ -779,6 +809,16 @@ static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb)
        }
 }
 
+static int hci_fw_upload(struct nfc_dev *nfc_dev, const char *firmware_name)
+{
+       struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+       if (!hdev->ops->fw_upload)
+               return -ENOTSUPP;
+
+       return hdev->ops->fw_upload(hdev, firmware_name);
+}
+
 static struct nfc_ops hci_nfc_ops = {
        .dev_up = hci_dev_up,
        .dev_down = hci_dev_down,
@@ -791,13 +831,16 @@ static struct nfc_ops hci_nfc_ops = {
        .im_transceive = hci_transceive,
        .tm_send = hci_tm_send,
        .check_presence = hci_check_presence,
+       .fw_upload = hci_fw_upload,
+       .discover_se = hci_discover_se,
+       .enable_se = hci_enable_se,
+       .disable_se = hci_disable_se,
 };
 
 struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
                                            struct nfc_hci_init_data *init_data,
                                            unsigned long quirks,
                                            u32 protocols,
-                                           u32 supported_se,
                                            const char *llc_name,
                                            int tx_headroom,
                                            int tx_tailroom,
@@ -823,7 +866,7 @@ struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
                return NULL;
        }
 
-       hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols, supported_se,
+       hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols,
                                         tx_headroom + HCI_CMDS_HEADROOM,
                                         tx_tailroom);
        if (!hdev->ndev) {
index ff8c434f7df89d5df8fd37281c06c315f35a6b18..f4d48b57ea113467807d0dd7517c4263bcd64939 100644 (file)
@@ -19,6 +19,8 @@
 
 enum llcp_state {
        LLCP_CONNECTED = 1, /* wait_for_packet() wants that */
+       LLCP_CONNECTING,
+       LLCP_DISCONNECTING,
        LLCP_CLOSED,
        LLCP_BOUND,
        LLCP_LISTEN,
@@ -246,7 +248,6 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
 void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);
 void nfc_llcp_free_sdp_tlv_list(struct hlist_head *sdp_head);
 void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
-int nfc_llcp_disconnect(struct nfc_llcp_sock *sock);
 int nfc_llcp_send_symm(struct nfc_dev *dev);
 int nfc_llcp_send_connect(struct nfc_llcp_sock *sock);
 int nfc_llcp_send_cc(struct nfc_llcp_sock *sock);
index c1b23eef83ca9180bb65cf0406e5424a8a558ada..1017894807c05a2c557cab7987450d52663156f8 100644 (file)
@@ -339,7 +339,7 @@ static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock,
        return skb;
 }
 
-int nfc_llcp_disconnect(struct nfc_llcp_sock *sock)
+int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock)
 {
        struct sk_buff *skb;
        struct nfc_dev *dev;
@@ -630,26 +630,6 @@ int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
        return 0;
 }
 
-int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock)
-{
-       struct sk_buff *skb;
-       struct nfc_llcp_local *local;
-
-       pr_debug("Send DISC\n");
-
-       local = sock->local;
-       if (local == NULL)
-               return -ENODEV;
-
-       skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0);
-       if (skb == NULL)
-               return -ENOMEM;
-
-       skb_queue_head(&local->tx_queue, skb);
-
-       return 0;
-}
-
 int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
                          struct msghdr *msg, size_t len)
 {
index 158bdbf668ccf91fb211f49107d6b770d2fe98e4..81cd3416c7d4dfaae5805441adb6179b210374a3 100644 (file)
@@ -537,6 +537,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
        u8 *lto_tlv, lto_length;
        u8 *wks_tlv, wks_length;
        u8 *miux_tlv, miux_length;
+       __be16 wks = cpu_to_be16(local->local_wks);
        u8 gb_len = 0;
        int ret = 0;
 
@@ -549,8 +550,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
        gb_len += lto_length;
 
        pr_debug("Local wks 0x%lx\n", local->local_wks);
-       wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&local->local_wks, 2,
-                                    &wks_length);
+       wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
        gb_len += wks_length;
 
        miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
@@ -719,6 +719,10 @@ static void nfc_llcp_tx_work(struct work_struct *work)
                llcp_sock = nfc_llcp_sock(sk);
 
                if (llcp_sock == NULL && nfc_llcp_ptype(skb) == LLCP_PDU_I) {
+                       kfree_skb(skb);
+                       nfc_llcp_send_symm(local->dev);
+               } else if (llcp_sock && !llcp_sock->remote_ready) {
+                       skb_queue_head(&local->tx_queue, skb);
                        nfc_llcp_send_symm(local->dev);
                } else {
                        struct sk_buff *copy_skb = NULL;
@@ -730,6 +734,13 @@ static void nfc_llcp_tx_work(struct work_struct *work)
                                       DUMP_PREFIX_OFFSET, 16, 1,
                                       skb->data, skb->len, true);
 
+                       if (ptype == LLCP_PDU_DISC && sk != NULL &&
+                           sk->sk_state == LLCP_DISCONNECTING) {
+                               nfc_llcp_sock_unlink(&local->sockets, sk);
+                               sock_orphan(sk);
+                               sock_put(sk);
+                       }
+
                        if (ptype == LLCP_PDU_I)
                                copy_skb = skb_copy(skb, GFP_ATOMIC);
 
@@ -1579,6 +1590,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
        local->lto = 150; /* 1500 ms */
        local->rw = LLCP_MAX_RW;
        local->miux = cpu_to_be16(LLCP_MAX_MIUX);
+       local->local_wks = 0x1; /* LLC Link Management */
 
        nfc_llcp_build_gb(local);
 
index 380253eccb74a87c3ca01f98daacd79c0a9deeea..d308402b67d80c192c948aa6db05c15089cfdbe8 100644 (file)
@@ -571,7 +571,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_shutdown == SHUTDOWN_MASK)
                mask |= POLLHUP;
 
-       if (sock_writeable(sk))
+       if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
        else
                set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -603,7 +603,7 @@ static int llcp_sock_release(struct socket *sock)
 
        /* Send a DISC */
        if (sk->sk_state == LLCP_CONNECTED)
-               nfc_llcp_disconnect(llcp_sock);
+               nfc_llcp_send_disconnect(llcp_sock);
 
        if (sk->sk_state == LLCP_LISTEN) {
                struct nfc_llcp_sock *lsk, *n;
@@ -614,7 +614,7 @@ static int llcp_sock_release(struct socket *sock)
                        accept_sk = &lsk->sk;
                        lock_sock(accept_sk);
 
-                       nfc_llcp_disconnect(lsk);
+                       nfc_llcp_send_disconnect(lsk);
                        nfc_llcp_accept_unlink(accept_sk);
 
                        release_sock(accept_sk);
@@ -626,6 +626,13 @@ static int llcp_sock_release(struct socket *sock)
 
        release_sock(sk);
 
+       /* Keep this sock alive and therefore do not remove it from the sockets
+        * list until the DISC PDU has been actually sent. Otherwise we would
+        * reply with DM PDUs before sending the DISC one.
+        */
+       if (sk->sk_state == LLCP_DISCONNECTING)
+               return err;
+
        if (sock->type == SOCK_RAW)
                nfc_llcp_sock_unlink(&local->raw_sockets, sk);
        else
@@ -722,14 +729,16 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
        if (ret)
                goto sock_unlink;
 
+       sk->sk_state = LLCP_CONNECTING;
+
        ret = sock_wait_state(sk, LLCP_CONNECTED,
                              sock_sndtimeo(sk, flags & O_NONBLOCK));
-       if (ret)
+       if (ret && ret != -EINPROGRESS)
                goto sock_unlink;
 
        release_sock(sk);
 
-       return 0;
+       return ret;
 
 sock_unlink:
        nfc_llcp_put_ssap(local, llcp_sock->ssap);
index 6d69b5f0f19b0ab97c779375cf6fa7bf474fc1b5..2a2416080b4fbfcf2127a4cab2abc755b5188c06 100644 (file)
@@ -8,3 +8,13 @@ config NFC_NCI
 
          Say Y here to compile NCI support into the kernel or say M to
          compile it as module (nci).
+
+config NFC_NCI_SPI
+       depends on NFC_NCI && SPI
+       bool "NCI over SPI protocol support"
+       default n
+       help
+         NCI (NFC Controller Interface) is a communication protocol between
+         an NFC Controller (NFCC) and a Device Host (DH).
+
+         Say yes if you use an NCI driver that requires SPI link layer.
index cdb3a2e444718f20834f10230c4558bfb10c6eef..7aeedc43187db0f7fce4727f4aed15c4a6cefe63 100644 (file)
@@ -4,4 +4,6 @@
 
 obj-$(CONFIG_NFC_NCI) += nci.o
 
-nci-objs := core.o data.o lib.o ntf.o rsp.o
\ No newline at end of file
+nci-objs := core.o data.o lib.o ntf.o rsp.o
+
+nci-$(CONFIG_NFC_NCI_SPI) += spi.o
index 48ada0ec749ec369bb6fc78308f12805cfa90c69..b943d46a1644324757bd60950a07eb02cc50f7ac 100644 (file)
@@ -636,6 +636,21 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
        return rc;
 }
 
+static int nci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx)
+{
+       return 0;
+}
+
+static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
+{
+       return 0;
+}
+
+static int nci_discover_se(struct nfc_dev *nfc_dev)
+{
+       return 0;
+}
+
 static struct nfc_ops nci_nfc_ops = {
        .dev_up = nci_dev_up,
        .dev_down = nci_dev_down,
@@ -646,6 +661,9 @@ static struct nfc_ops nci_nfc_ops = {
        .activate_target = nci_activate_target,
        .deactivate_target = nci_deactivate_target,
        .im_transceive = nci_transceive,
+       .enable_se = nci_enable_se,
+       .disable_se = nci_disable_se,
+       .discover_se = nci_discover_se,
 };
 
 /* ---- Interface to NCI drivers ---- */
@@ -658,7 +676,6 @@ static struct nfc_ops nci_nfc_ops = {
  */
 struct nci_dev *nci_allocate_device(struct nci_ops *ops,
                                    __u32 supported_protocols,
-                                   __u32 supported_se,
                                    int tx_headroom, int tx_tailroom)
 {
        struct nci_dev *ndev;
@@ -681,7 +698,6 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
 
        ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
                                            supported_protocols,
-                                           supported_se,
                                            tx_headroom + NCI_DATA_HDR_SIZE,
                                            tx_tailroom);
        if (!ndev->nfc_dev)
@@ -797,12 +813,11 @@ EXPORT_SYMBOL(nci_unregister_device);
 /**
  * nci_recv_frame - receive frame from NCI drivers
  *
+ * @ndev: The nci device
  * @skb: The sk_buff to receive
  */
-int nci_recv_frame(struct sk_buff *skb)
+int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
 {
-       struct nci_dev *ndev = (struct nci_dev *) skb->dev;
-
        pr_debug("len %d\n", skb->len);
 
        if (!ndev || (!test_bit(NCI_UP, &ndev->flags) &&
@@ -819,10 +834,8 @@ int nci_recv_frame(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(nci_recv_frame);
 
-static int nci_send_frame(struct sk_buff *skb)
+static int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb)
 {
-       struct nci_dev *ndev = (struct nci_dev *) skb->dev;
-
        pr_debug("len %d\n", skb->len);
 
        if (!ndev) {
@@ -833,7 +846,7 @@ static int nci_send_frame(struct sk_buff *skb)
        /* Get rid of skb owner, prior to sending to the driver. */
        skb_orphan(skb);
 
-       return ndev->ops->send(skb);
+       return ndev->ops->send(ndev, skb);
 }
 
 /* Send NCI command */
@@ -861,8 +874,6 @@ int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
        if (plen)
                memcpy(skb_put(skb, plen), payload, plen);
 
-       skb->dev = (void *) ndev;
-
        skb_queue_tail(&ndev->cmd_q, skb);
        queue_work(ndev->cmd_wq, &ndev->cmd_work);
 
@@ -894,7 +905,7 @@ static void nci_tx_work(struct work_struct *work)
                         nci_conn_id(skb->data),
                         nci_plen(skb->data));
 
-               nci_send_frame(skb);
+               nci_send_frame(ndev, skb);
 
                mod_timer(&ndev->data_timer,
                          jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
@@ -963,7 +974,7 @@ static void nci_cmd_work(struct work_struct *work)
                         nci_opcode_oid(nci_opcode(skb->data)),
                         nci_plen(skb->data));
 
-               nci_send_frame(skb);
+               nci_send_frame(ndev, skb);
 
                mod_timer(&ndev->cmd_timer,
                          jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
index 76c48c5324f8fd44226d8d7b8997160dd6b062be..2a9399dd6c685566928054b16e2734984d737c94 100644 (file)
@@ -80,8 +80,6 @@ static inline void nci_push_data_hdr(struct nci_dev *ndev,
 
        nci_mt_set((__u8 *)hdr, NCI_MT_DATA_PKT);
        nci_pbf_set((__u8 *)hdr, pbf);
-
-       skb->dev = (void *) ndev;
 }
 
 static int nci_queue_tx_data_frags(struct nci_dev *ndev,
diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c
new file mode 100644 (file)
index 0000000..c7cf37b
--- /dev/null
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2013  Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#define pr_fmt(fmt) "nci_spi: %s: " fmt, __func__
+
+#include <linux/export.h>
+#include <linux/spi/spi.h>
+#include <linux/crc-ccitt.h>
+#include <linux/nfc.h>
+#include <net/nfc/nci_core.h>
+
+#define NCI_SPI_HDR_LEN                        4
+#define NCI_SPI_CRC_LEN                        2
+#define NCI_SPI_ACK_SHIFT              6
+#define NCI_SPI_MSB_PAYLOAD_MASK       0x3F
+
+#define NCI_SPI_SEND_TIMEOUT   (NCI_CMD_TIMEOUT > NCI_DATA_TIMEOUT ? \
+                                       NCI_CMD_TIMEOUT : NCI_DATA_TIMEOUT)
+
+#define NCI_SPI_DIRECT_WRITE   0x01
+#define NCI_SPI_DIRECT_READ    0x02
+
+#define ACKNOWLEDGE_NONE       0
+#define ACKNOWLEDGE_ACK                1
+#define ACKNOWLEDGE_NACK       2
+
+#define CRC_INIT               0xFFFF
+
+static int nci_spi_open(struct nci_dev *nci_dev)
+{
+       struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev);
+
+       return ndev->ops->open(ndev);
+}
+
+static int nci_spi_close(struct nci_dev *nci_dev)
+{
+       struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev);
+
+       return ndev->ops->close(ndev);
+}
+
+static int __nci_spi_send(struct nci_spi_dev *ndev, struct sk_buff *skb)
+{
+       struct spi_message m;
+       struct spi_transfer t;
+
+       t.tx_buf = skb->data;
+       t.len = skb->len;
+       t.cs_change = 0;
+       t.delay_usecs = ndev->xfer_udelay;
+
+       spi_message_init(&m);
+       spi_message_add_tail(&t, &m);
+
+       return spi_sync(ndev->spi, &m);
+}
+
+static int nci_spi_send(struct nci_dev *nci_dev, struct sk_buff *skb)
+{
+       struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev);
+       unsigned int payload_len = skb->len;
+       unsigned char *hdr;
+       int ret;
+       long completion_rc;
+
+       ndev->ops->deassert_int(ndev);
+
+       /* add the NCI SPI header to the start of the buffer */
+       hdr = skb_push(skb, NCI_SPI_HDR_LEN);
+       hdr[0] = NCI_SPI_DIRECT_WRITE;
+       hdr[1] = ndev->acknowledge_mode;
+       hdr[2] = payload_len >> 8;
+       hdr[3] = payload_len & 0xFF;
+
+       if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
+               u16 crc;
+
+               crc = crc_ccitt(CRC_INIT, skb->data, skb->len);
+               *skb_put(skb, 1) = crc >> 8;
+               *skb_put(skb, 1) = crc & 0xFF;
+       }
+
+       ret = __nci_spi_send(ndev, skb);
+
+       kfree_skb(skb);
+       ndev->ops->assert_int(ndev);
+
+       if (ret != 0 || ndev->acknowledge_mode == NCI_SPI_CRC_DISABLED)
+               goto done;
+
+       init_completion(&ndev->req_completion);
+       completion_rc =
+               wait_for_completion_interruptible_timeout(&ndev->req_completion,
+                                                         NCI_SPI_SEND_TIMEOUT);
+
+       if (completion_rc <= 0 || ndev->req_result == ACKNOWLEDGE_NACK)
+               ret = -EIO;
+
+done:
+       return ret;
+}
+
+static struct nci_ops nci_spi_ops = {
+       .open = nci_spi_open,
+       .close = nci_spi_close,
+       .send = nci_spi_send,
+};
+
+/* ---- Interface to NCI SPI drivers ---- */
+
+/**
+ * nci_spi_allocate_device - allocate a new nci spi device
+ *
+ * @spi: SPI device
+ * @ops: device operations
+ * @supported_protocols: NFC protocols supported by the device
+ * @supported_se: NFC Secure Elements supported by the device
+ * @acknowledge_mode: Acknowledge mode used by the device
+ * @delay: delay between transactions in us
+ */
+struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi,
+                                               struct nci_spi_ops *ops,
+                                               u32 supported_protocols,
+                                               u32 supported_se,
+                                               u8 acknowledge_mode,
+                                               unsigned int delay)
+{
+       struct nci_spi_dev *ndev;
+       int tailroom = 0;
+
+       if (!ops->open || !ops->close || !ops->assert_int || !ops->deassert_int)
+               return NULL;
+
+       if (!supported_protocols)
+               return NULL;
+
+       ndev = devm_kzalloc(&spi->dev, sizeof(struct nci_dev), GFP_KERNEL);
+       if (!ndev)
+               return NULL;
+
+       ndev->ops = ops;
+       ndev->acknowledge_mode = acknowledge_mode;
+       ndev->xfer_udelay = delay;
+
+       if (acknowledge_mode == NCI_SPI_CRC_ENABLED)
+               tailroom += NCI_SPI_CRC_LEN;
+
+       ndev->nci_dev = nci_allocate_device(&nci_spi_ops, supported_protocols,
+                                           NCI_SPI_HDR_LEN, tailroom);
+       if (!ndev->nci_dev)
+               return NULL;
+
+       nci_set_drvdata(ndev->nci_dev, ndev);
+
+       return ndev;
+}
+EXPORT_SYMBOL_GPL(nci_spi_allocate_device);
+
+/**
+ * nci_spi_free_device - deallocate nci spi device
+ *
+ * @ndev: The nci spi device to deallocate
+ */
+void nci_spi_free_device(struct nci_spi_dev *ndev)
+{
+       nci_free_device(ndev->nci_dev);
+}
+EXPORT_SYMBOL_GPL(nci_spi_free_device);
+
+/**
+ * nci_spi_register_device - register a nci spi device in the nfc subsystem
+ *
+ * @pdev: The nci spi device to register
+ */
+int nci_spi_register_device(struct nci_spi_dev *ndev)
+{
+       return nci_register_device(ndev->nci_dev);
+}
+EXPORT_SYMBOL_GPL(nci_spi_register_device);
+
+/**
+ * nci_spi_unregister_device - unregister a nci spi device in the nfc subsystem
+ *
+ * @dev: The nci spi device to unregister
+ */
+void nci_spi_unregister_device(struct nci_spi_dev *ndev)
+{
+       nci_unregister_device(ndev->nci_dev);
+}
+EXPORT_SYMBOL_GPL(nci_spi_unregister_device);
+
+static int send_acknowledge(struct nci_spi_dev *ndev, u8 acknowledge)
+{
+       struct sk_buff *skb;
+       unsigned char *hdr;
+       u16 crc;
+       int ret;
+
+       skb = nci_skb_alloc(ndev->nci_dev, 0, GFP_KERNEL);
+
+       /* add the NCI SPI header to the start of the buffer */
+       hdr = skb_push(skb, NCI_SPI_HDR_LEN);
+       hdr[0] = NCI_SPI_DIRECT_WRITE;
+       hdr[1] = NCI_SPI_CRC_ENABLED;
+       hdr[2] = acknowledge << NCI_SPI_ACK_SHIFT;
+       hdr[3] = 0;
+
+       crc = crc_ccitt(CRC_INIT, skb->data, skb->len);
+       *skb_put(skb, 1) = crc >> 8;
+       *skb_put(skb, 1) = crc & 0xFF;
+
+       ret = __nci_spi_send(ndev, skb);
+
+       kfree_skb(skb);
+
+       return ret;
+}
+
+static struct sk_buff *__nci_spi_recv_frame(struct nci_spi_dev *ndev)
+{
+       struct sk_buff *skb;
+       struct spi_message m;
+       unsigned char req[2], resp_hdr[2];
+       struct spi_transfer tx, rx;
+       unsigned short rx_len = 0;
+       int ret;
+
+       spi_message_init(&m);
+       req[0] = NCI_SPI_DIRECT_READ;
+       req[1] = ndev->acknowledge_mode;
+       tx.tx_buf = req;
+       tx.len = 2;
+       tx.cs_change = 0;
+       spi_message_add_tail(&tx, &m);
+       rx.rx_buf = resp_hdr;
+       rx.len = 2;
+       rx.cs_change = 1;
+       spi_message_add_tail(&rx, &m);
+       ret = spi_sync(ndev->spi, &m);
+
+       if (ret)
+               return NULL;
+
+       if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED)
+               rx_len = ((resp_hdr[0] & NCI_SPI_MSB_PAYLOAD_MASK) << 8) +
+                               resp_hdr[1] + NCI_SPI_CRC_LEN;
+       else
+               rx_len = (resp_hdr[0] << 8) | resp_hdr[1];
+
+       skb = nci_skb_alloc(ndev->nci_dev, rx_len, GFP_KERNEL);
+       if (!skb)
+               return NULL;
+
+       spi_message_init(&m);
+       rx.rx_buf = skb_put(skb, rx_len);
+       rx.len = rx_len;
+       rx.cs_change = 0;
+       rx.delay_usecs = ndev->xfer_udelay;
+       spi_message_add_tail(&rx, &m);
+       ret = spi_sync(ndev->spi, &m);
+
+       if (ret)
+               goto receive_error;
+
+       if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
+               *skb_push(skb, 1) = resp_hdr[1];
+               *skb_push(skb, 1) = resp_hdr[0];
+       }
+
+       return skb;
+
+receive_error:
+       kfree_skb(skb);
+
+       return NULL;
+}
+
+static int nci_spi_check_crc(struct sk_buff *skb)
+{
+       u16 crc_data = (skb->data[skb->len - 2] << 8) |
+                       skb->data[skb->len - 1];
+       int ret;
+
+       ret = (crc_ccitt(CRC_INIT, skb->data, skb->len - NCI_SPI_CRC_LEN)
+                       == crc_data);
+
+       skb_trim(skb, skb->len - NCI_SPI_CRC_LEN);
+
+       return ret;
+}
+
+static u8 nci_spi_get_ack(struct sk_buff *skb)
+{
+       u8 ret;
+
+       ret = skb->data[0] >> NCI_SPI_ACK_SHIFT;
+
+       /* Remove NFCC part of the header: ACK, NACK and MSB payload len */
+       skb_pull(skb, 2);
+
+       return ret;
+}
+
+/**
+ * nci_spi_recv_frame - receive frame from NCI SPI drivers
+ *
+ * @ndev: The nci spi device
+ * Context: can sleep
+ *
+ * This call may only be used from a context that may sleep.  The sleep
+ * is non-interruptible, and has no timeout.
+ *
+ * It returns zero on success, else a negative error code.
+ */
+int nci_spi_recv_frame(struct nci_spi_dev *ndev)
+{
+       struct sk_buff *skb;
+       int ret = 0;
+
+       ndev->ops->deassert_int(ndev);
+
+       /* Retrieve frame from SPI */
+       skb = __nci_spi_recv_frame(ndev);
+       if (!skb) {
+               ret = -EIO;
+               goto done;
+       }
+
+       if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
+               if (!nci_spi_check_crc(skb)) {
+                       send_acknowledge(ndev, ACKNOWLEDGE_NACK);
+                       goto done;
+               }
+
+               /* In case of acknowledged mode: if ACK or NACK received,
+                * unblock completion of latest frame sent.
+                */
+               ndev->req_result = nci_spi_get_ack(skb);
+               if (ndev->req_result)
+                       complete(&ndev->req_completion);
+       }
+
+       /* If there is no payload (ACK/NACK only frame),
+        * free the socket buffer
+        */
+       if (skb->len == 0) {
+               kfree_skb(skb);
+               goto done;
+       }
+
+       if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED)
+               send_acknowledge(ndev, ACKNOWLEDGE_ACK);
+
+       /* Forward skb to NCI core layer */
+       ret = nci_recv_frame(ndev->nci_dev, skb);
+
+done:
+       ndev->ops->assert_int(ndev);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(nci_spi_recv_frame);
index f0c4d61f37c0f623df3eb40a1222a87f9c875f69..b05ad909778fe5978677f3f81a31360220f98eb7 100644 (file)
@@ -56,6 +56,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
        [NFC_ATTR_LLC_PARAM_RW] = { .type = NLA_U8 },
        [NFC_ATTR_LLC_PARAM_MIUX] = { .type = NLA_U16 },
        [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
+       [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
+                                    .len = NFC_FIRMWARE_NAME_MAXSIZE },
 };
 
 static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
@@ -424,6 +426,69 @@ free_msg:
        return rc;
 }
 
+int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type)
+{
+       struct sk_buff *msg;
+       void *hdr;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+                         NFC_EVENT_SE_ADDED);
+       if (!hdr)
+               goto free_msg;
+
+       if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+           nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx) ||
+           nla_put_u8(msg, NFC_ATTR_SE_TYPE, type))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+
+       genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
+
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+free_msg:
+       nlmsg_free(msg);
+       return -EMSGSIZE;
+}
+
+int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx)
+{
+       struct sk_buff *msg;
+       void *hdr;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+                         NFC_EVENT_SE_REMOVED);
+       if (!hdr)
+               goto free_msg;
+
+       if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+           nla_put_u32(msg, NFC_ATTR_SE_INDEX, se_idx))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+
+       genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
+
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+free_msg:
+       nlmsg_free(msg);
+       return -EMSGSIZE;
+}
+
 static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
                                u32 portid, u32 seq,
                                struct netlink_callback *cb,
@@ -442,7 +507,6 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
        if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
            nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
            nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
-           nla_put_u32(msg, NFC_ATTR_SE, dev->supported_se) ||
            nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up) ||
            nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode))
                goto nla_put_failure;
@@ -1025,6 +1089,108 @@ exit:
        return rc;
 }
 
+static int nfc_genl_fw_upload(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nfc_dev *dev;
+       int rc;
+       u32 idx;
+       char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+
+       if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+               return -EINVAL;
+
+       idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+
+       dev = nfc_get_device(idx);
+       if (!dev)
+               return -ENODEV;
+
+       nla_strlcpy(firmware_name, info->attrs[NFC_ATTR_FIRMWARE_NAME],
+                   sizeof(firmware_name));
+
+       rc = nfc_fw_upload(dev, firmware_name);
+
+       nfc_put_device(dev);
+       return rc;
+}
+
+int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name)
+{
+       struct sk_buff *msg;
+       void *hdr;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+                         NFC_CMD_FW_UPLOAD);
+       if (!hdr)
+               goto free_msg;
+
+       if (nla_put_string(msg, NFC_ATTR_FIRMWARE_NAME, firmware_name) ||
+           nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+
+       genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
+
+       return 0;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+free_msg:
+       nlmsg_free(msg);
+       return -EMSGSIZE;
+}
+
+static int nfc_genl_enable_se(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nfc_dev *dev;
+       int rc;
+       u32 idx, se_idx;
+
+       if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+           !info->attrs[NFC_ATTR_SE_INDEX])
+               return -EINVAL;
+
+       idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+       se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]);
+
+       dev = nfc_get_device(idx);
+       if (!dev)
+               return -ENODEV;
+
+       rc = nfc_enable_se(dev, se_idx);
+
+       nfc_put_device(dev);
+       return rc;
+}
+
+static int nfc_genl_disable_se(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nfc_dev *dev;
+       int rc;
+       u32 idx, se_idx;
+
+       if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+           !info->attrs[NFC_ATTR_SE_INDEX])
+               return -EINVAL;
+
+       idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+       se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]);
+
+       dev = nfc_get_device(idx);
+       if (!dev)
+               return -ENODEV;
+
+       rc = nfc_disable_se(dev, se_idx);
+
+       nfc_put_device(dev);
+       return rc;
+}
+
 static struct genl_ops nfc_genl_ops[] = {
        {
                .cmd = NFC_CMD_GET_DEVICE,
@@ -1084,6 +1250,21 @@ static struct genl_ops nfc_genl_ops[] = {
                .doit = nfc_genl_llc_sdreq,
                .policy = nfc_genl_policy,
        },
+       {
+               .cmd = NFC_CMD_FW_UPLOAD,
+               .doit = nfc_genl_fw_upload,
+               .policy = nfc_genl_policy,
+       },
+       {
+               .cmd = NFC_CMD_ENABLE_SE,
+               .doit = nfc_genl_enable_se,
+               .policy = nfc_genl_policy,
+       },
+       {
+               .cmd = NFC_CMD_DISABLE_SE,
+               .doit = nfc_genl_disable_se,
+               .policy = nfc_genl_policy,
+       },
 };
 
 
index afa1f84ba0406c6bb7622043302a2a6d9bb04703..ee85a1fc1b2443d88982ca6985574936b0359292 100644 (file)
@@ -94,6 +94,9 @@ int nfc_genl_tm_deactivated(struct nfc_dev *dev);
 
 int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list);
 
+int nfc_genl_se_added(struct nfc_dev *dev, u32 se_idx, u16 type);
+int nfc_genl_se_removed(struct nfc_dev *dev, u32 se_idx);
+
 struct nfc_dev *nfc_get_device(unsigned int idx);
 
 static inline void nfc_put_device(struct nfc_dev *dev)
@@ -120,6 +123,11 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
        class_dev_iter_exit(iter);
 }
 
+int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name);
+int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name);
+
+int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name);
+
 int nfc_dev_up(struct nfc_dev *dev);
 
 int nfc_dev_down(struct nfc_dev *dev);
@@ -139,4 +147,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx);
 int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
                      data_exchange_cb_t cb, void *cb_context);
 
+int nfc_enable_se(struct nfc_dev *dev, u32 se_idx);
+int nfc_disable_se(struct nfc_dev *dev, u32 se_idx);
+
 #endif /* __LOCAL_NFC_H */
index b416093997dafcf528204be9c99a9986dfe7965d..6b94633ca61d67e29faa58a7d42410e1ef678193 100644 (file)
@@ -2412,7 +2412,7 @@ static const unsigned char nargs[21] = {
 
 SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
 {
-       unsigned long a[6];
+       unsigned long a[AUDITSC_ARGS];
        unsigned long a0, a1;
        int err;
        unsigned int len;
@@ -2428,7 +2428,9 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
        if (copy_from_user(a, args, len))
                return -EFAULT;
 
-       audit_socketcall(nargs[call] / sizeof(unsigned long), a);
+       err = audit_socketcall(nargs[call] / sizeof(unsigned long), a);
+       if (err)
+               return err;
 
        a0 = a[0];
        a1 = a[1];
index 5c4c61d527e2dbc1752e1d2ddbfc04417e4fc286..357f613df7ff49d3460e37d3f338cb89c7029a69 100644 (file)
 #include <linux/sunrpc/svcauth.h>
 #include "gss_rpc_xdr.h"
 
-static bool gssx_check_pointer(struct xdr_stream *xdr)
-{
-       __be32 *p;
-
-       p = xdr_reserve_space(xdr, 4);
-       if (unlikely(p == NULL))
-               return -ENOSPC;
-       return *p?true:false;
-}
-
 static int gssx_enc_bool(struct xdr_stream *xdr, int v)
 {
        __be32 *p;
@@ -264,25 +254,27 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
        if (unlikely(p == NULL))
                return -ENOSPC;
        count = be32_to_cpup(p++);
-       if (count != 0) {
-               /* we recognize only 1 currently: CREDS_VALUE */
-               oa->count = 1;
+       if (!count)
+               return 0;
 
-               oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL);
-               if (!oa->data)
-                       return -ENOMEM;
+       /* we recognize only 1 currently: CREDS_VALUE */
+       oa->count = 1;
 
-               creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
-               if (!creds) {
-                       kfree(oa->data);
-                       return -ENOMEM;
-               }
+       oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL);
+       if (!oa->data)
+               return -ENOMEM;
 
-               oa->data[0].option.data = CREDS_VALUE;
-               oa->data[0].option.len = sizeof(CREDS_VALUE);
-               oa->data[0].value.data = (void *)creds;
-               oa->data[0].value.len = 0;
+       creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
+       if (!creds) {
+               kfree(oa->data);
+               return -ENOMEM;
        }
+
+       oa->data[0].option.data = CREDS_VALUE;
+       oa->data[0].option.len = sizeof(CREDS_VALUE);
+       oa->data[0].value.data = (void *)creds;
+       oa->data[0].value.len = 0;
+
        for (i = 0; i < count; i++) {
                gssx_buffer dummy = { 0, NULL };
                u32 length;
@@ -800,6 +792,7 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
                                struct xdr_stream *xdr,
                                struct gssx_res_accept_sec_context *res)
 {
+       u32 value_follows;
        int err;
 
        /* res->status */
@@ -808,7 +801,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
                return err;
 
        /* res->context_handle */
-       if (gssx_check_pointer(xdr)) {
+       err = gssx_dec_bool(xdr, &value_follows);
+       if (err)
+               return err;
+       if (value_follows) {
                err = gssx_dec_ctx(xdr, res->context_handle);
                if (err)
                        return err;
@@ -817,7 +813,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
        }
 
        /* res->output_token */
-       if (gssx_check_pointer(xdr)) {
+       err = gssx_dec_bool(xdr, &value_follows);
+       if (err)
+               return err;
+       if (value_follows) {
                err = gssx_dec_buffer(xdr, res->output_token);
                if (err)
                        return err;
@@ -826,7 +825,10 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
        }
 
        /* res->delegated_cred_handle */
-       if (gssx_check_pointer(xdr)) {
+       err = gssx_dec_bool(xdr, &value_follows);
+       if (err)
+               return err;
+       if (value_follows) {
                /* we do not support upcall servers sending this data. */
                return -EINVAL;
        }
index fd556ac05fdbc5c48c365005a0e7deddef79a0ac..50f6195c8b70b816ab35ade4075c5f71de105b35 100644 (file)
@@ -54,6 +54,8 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
        control_freq = chandef->chan->center_freq;
 
        switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_5:
+       case NL80211_CHAN_WIDTH_10:
        case NL80211_CHAN_WIDTH_20:
        case NL80211_CHAN_WIDTH_20_NOHT:
                if (chandef->center_freq1 != control_freq)
@@ -152,6 +154,12 @@ static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
        int width;
 
        switch (c->width) {
+       case NL80211_CHAN_WIDTH_5:
+               width = 5;
+               break;
+       case NL80211_CHAN_WIDTH_10:
+               width = 10;
+               break;
        case NL80211_CHAN_WIDTH_20:
        case NL80211_CHAN_WIDTH_20_NOHT:
                width = 20;
@@ -194,6 +202,16 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
        if (c1->width == c2->width)
                return NULL;
 
+       /*
+        * can't be compatible if one of them is 5 or 10 MHz,
+        * but they don't have the same width.
+        */
+       if (c1->width == NL80211_CHAN_WIDTH_5 ||
+           c1->width == NL80211_CHAN_WIDTH_10 ||
+           c2->width == NL80211_CHAN_WIDTH_5 ||
+           c2->width == NL80211_CHAN_WIDTH_10)
+               return NULL;
+
        if (c1->width == NL80211_CHAN_WIDTH_20_NOHT ||
            c1->width == NL80211_CHAN_WIDTH_20)
                return c2;
@@ -264,11 +282,17 @@ static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy,
                                            u32 bandwidth)
 {
        struct ieee80211_channel *c;
-       u32 freq;
+       u32 freq, start_freq, end_freq;
+
+       if (bandwidth <= 20) {
+               start_freq = center_freq;
+               end_freq = center_freq;
+       } else {
+               start_freq = center_freq - bandwidth/2 + 10;
+               end_freq = center_freq + bandwidth/2 - 10;
+       }
 
-       for (freq = center_freq - bandwidth/2 + 10;
-            freq <= center_freq + bandwidth/2 - 10;
-            freq += 20) {
+       for (freq = start_freq; freq <= end_freq; freq += 20) {
                c = ieee80211_get_channel(wiphy, freq);
                if (!c)
                        return -EINVAL;
@@ -310,11 +334,17 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
                                        u32 prohibited_flags)
 {
        struct ieee80211_channel *c;
-       u32 freq;
+       u32 freq, start_freq, end_freq;
+
+       if (bandwidth <= 20) {
+               start_freq = center_freq;
+               end_freq = center_freq;
+       } else {
+               start_freq = center_freq - bandwidth/2 + 10;
+               end_freq = center_freq + bandwidth/2 - 10;
+       }
 
-       for (freq = center_freq - bandwidth/2 + 10;
-            freq <= center_freq + bandwidth/2 - 10;
-            freq += 20) {
+       for (freq = start_freq; freq <= end_freq; freq += 20) {
                c = ieee80211_get_channel(wiphy, freq);
                if (!c)
                        return false;
@@ -349,6 +379,12 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
        control_freq = chandef->chan->center_freq;
 
        switch (chandef->width) {
+       case NL80211_CHAN_WIDTH_5:
+               width = 5;
+               break;
+       case NL80211_CHAN_WIDTH_10:
+               width = 10;
+               break;
        case NL80211_CHAN_WIDTH_20:
                if (!ht_cap->ht_supported)
                        return false;
@@ -405,6 +441,11 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
        if (width > 20)
                prohibited_flags |= IEEE80211_CHAN_NO_OFDM;
 
+       /* 5 and 10 MHz are only defined for the OFDM PHY */
+       if (width < 20)
+               prohibited_flags |= IEEE80211_CHAN_NO_OFDM;
+
+
        if (!cfg80211_secondary_chans_ok(wiphy, chandef->center_freq1,
                                         width, prohibited_flags))
                return false;
index 73405e00c800fd62ff703c9c030e31da70c69858..672459b9483b48ac19184bc223cd80c6a8b75cf9 100644 (file)
 MODULE_AUTHOR("Johannes Berg");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("wireless configuration support");
+MODULE_ALIAS_GENL_FAMILY(NL80211_GENL_NAME);
 
-/* RCU-protected (and cfg80211_mutex for writers) */
+/* RCU-protected (and RTNL for writers) */
 LIST_HEAD(cfg80211_rdev_list);
 int cfg80211_rdev_list_generation;
 
-DEFINE_MUTEX(cfg80211_mutex);
-
 /* for debugfs */
 static struct dentry *ieee80211_debugfs_dir;
 
@@ -52,12 +51,11 @@ module_param(cfg80211_disable_40mhz_24ghz, bool, 0644);
 MODULE_PARM_DESC(cfg80211_disable_40mhz_24ghz,
                 "Disable 40MHz support in the 2.4GHz band");
 
-/* requires cfg80211_mutex to be held! */
 struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
 {
        struct cfg80211_registered_device *result = NULL, *rdev;
 
-       assert_cfg80211_lock();
+       ASSERT_RTNL();
 
        list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
                if (rdev->wiphy_idx == wiphy_idx) {
@@ -76,12 +74,11 @@ int get_wiphy_idx(struct wiphy *wiphy)
        return rdev->wiphy_idx;
 }
 
-/* requires cfg80211_rdev_mutex to be held! */
 struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx)
 {
        struct cfg80211_registered_device *rdev;
 
-       assert_cfg80211_lock();
+       ASSERT_RTNL();
 
        rdev = cfg80211_rdev_by_wiphy_idx(wiphy_idx);
        if (!rdev)
@@ -89,35 +86,13 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx)
        return &rdev->wiphy;
 }
 
-struct cfg80211_registered_device *
-cfg80211_get_dev_from_ifindex(struct net *net, int ifindex)
-{
-       struct cfg80211_registered_device *rdev = ERR_PTR(-ENODEV);
-       struct net_device *dev;
-
-       mutex_lock(&cfg80211_mutex);
-       dev = dev_get_by_index(net, ifindex);
-       if (!dev)
-               goto out;
-       if (dev->ieee80211_ptr) {
-               rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy);
-               mutex_lock(&rdev->mtx);
-       } else
-               rdev = ERR_PTR(-ENODEV);
-       dev_put(dev);
- out:
-       mutex_unlock(&cfg80211_mutex);
-       return rdev;
-}
-
-/* requires cfg80211_mutex to be held */
 int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
                        char *newname)
 {
        struct cfg80211_registered_device *rdev2;
        int wiphy_idx, taken = -1, result, digits;
 
-       assert_cfg80211_lock();
+       ASSERT_RTNL();
 
        /* prohibit calling the thing phy%d when %d is not its number */
        sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken);
@@ -215,8 +190,7 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
 void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
                              struct wireless_dev *wdev)
 {
-       lockdep_assert_held(&rdev->devlist_mtx);
-       lockdep_assert_held(&rdev->sched_scan_mtx);
+       ASSERT_RTNL();
 
        if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE))
                return;
@@ -230,18 +204,15 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
        rdev->opencount--;
 
        if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
-               bool busy = work_busy(&rdev->scan_done_wk);
-
                /*
-                * If the work isn't pending or running (in which case it would
-                * be waiting for the lock we hold) the driver didn't properly
-                * cancel the scan when the interface was removed. In this case
-                * warn and leak the scan request object to not crash later.
+                * If the scan request wasn't notified as done, set it
+                * to aborted and leak it after a warning. The driver
+                * should have notified us that it ended at the latest
+                * during rdev_stop_p2p_device().
                 */
-               WARN_ON(!busy);
-
-               rdev->scan_req->aborted = true;
-               ___cfg80211_scan_done(rdev, !busy);
+               if (WARN_ON(!rdev->scan_req->notified))
+                       rdev->scan_req->aborted = true;
+               ___cfg80211_scan_done(rdev, !rdev->scan_req->notified);
        }
 }
 
@@ -255,8 +226,6 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
 
        rtnl_lock();
 
-       /* read-only iteration need not hold the devlist_mtx */
-
        list_for_each_entry(wdev, &rdev->wdev_list, list) {
                if (wdev->netdev) {
                        dev_close(wdev->netdev);
@@ -265,12 +234,7 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
                /* otherwise, check iftype */
                switch (wdev->iftype) {
                case NL80211_IFTYPE_P2P_DEVICE:
-                       /* but this requires it */
-                       mutex_lock(&rdev->devlist_mtx);
-                       mutex_lock(&rdev->sched_scan_mtx);
                        cfg80211_stop_p2p_device(rdev, wdev);
-                       mutex_unlock(&rdev->sched_scan_mtx);
-                       mutex_unlock(&rdev->devlist_mtx);
                        break;
                default:
                        break;
@@ -298,10 +262,7 @@ static void cfg80211_event_work(struct work_struct *work)
                            event_work);
 
        rtnl_lock();
-       cfg80211_lock_rdev(rdev);
-
        cfg80211_process_rdev_events(rdev);
-       cfg80211_unlock_rdev(rdev);
        rtnl_unlock();
 }
 
@@ -309,7 +270,7 @@ static void cfg80211_event_work(struct work_struct *work)
 
 struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
 {
-       static int wiphy_counter;
+       static atomic_t wiphy_counter = ATOMIC_INIT(0);
 
        struct cfg80211_registered_device *rdev;
        int alloc_size;
@@ -331,26 +292,21 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
 
        rdev->ops = ops;
 
-       mutex_lock(&cfg80211_mutex);
-
-       rdev->wiphy_idx = wiphy_counter++;
+       rdev->wiphy_idx = atomic_inc_return(&wiphy_counter);
 
        if (unlikely(rdev->wiphy_idx < 0)) {
-               wiphy_counter--;
-               mutex_unlock(&cfg80211_mutex);
                /* ugh, wrapped! */
+               atomic_dec(&wiphy_counter);
                kfree(rdev);
                return NULL;
        }
 
-       mutex_unlock(&cfg80211_mutex);
+       /* atomic_inc_return makes it start at 1, make it start at 0 */
+       rdev->wiphy_idx--;
 
        /* give it a proper name */
        dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
 
-       mutex_init(&rdev->mtx);
-       mutex_init(&rdev->devlist_mtx);
-       mutex_init(&rdev->sched_scan_mtx);
        INIT_LIST_HEAD(&rdev->wdev_list);
        INIT_LIST_HEAD(&rdev->beacon_registrations);
        spin_lock_init(&rdev->beacon_registrations_lock);
@@ -496,8 +452,13 @@ int wiphy_register(struct wiphy *wiphy)
        u16 ifmodes = wiphy->interface_modes;
 
 #ifdef CONFIG_PM
-       if (WARN_ON((wiphy->wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
-                   !(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)))
+       if (WARN_ON(wiphy->wowlan &&
+                   (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
+                   !(wiphy->wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)))
+               return -EINVAL;
+       if (WARN_ON(wiphy->wowlan &&
+                   !wiphy->wowlan->flags && !wiphy->wowlan->n_patterns &&
+                   !wiphy->wowlan->tcp))
                return -EINVAL;
 #endif
 
@@ -587,25 +548,28 @@ int wiphy_register(struct wiphy *wiphy)
        }
 
 #ifdef CONFIG_PM
-       if (rdev->wiphy.wowlan.n_patterns) {
-               if (WARN_ON(!rdev->wiphy.wowlan.pattern_min_len ||
-                           rdev->wiphy.wowlan.pattern_min_len >
-                           rdev->wiphy.wowlan.pattern_max_len))
-                       return -EINVAL;
-       }
+       if (WARN_ON(rdev->wiphy.wowlan && rdev->wiphy.wowlan->n_patterns &&
+                   (!rdev->wiphy.wowlan->pattern_min_len ||
+                    rdev->wiphy.wowlan->pattern_min_len >
+                               rdev->wiphy.wowlan->pattern_max_len)))
+               return -EINVAL;
 #endif
 
        /* check and set up bitrates */
        ieee80211_set_bitrate_flags(wiphy);
 
-       mutex_lock(&cfg80211_mutex);
 
        res = device_add(&rdev->wiphy.dev);
+       if (res)
+               return res;
+
+       res = rfkill_register(rdev->rfkill);
        if (res) {
-               mutex_unlock(&cfg80211_mutex);
+               device_del(&rdev->wiphy.dev);
                return res;
        }
 
+       rtnl_lock();
        /* set up regulatory info */
        wiphy_regulatory_register(wiphy);
 
@@ -631,25 +595,7 @@ int wiphy_register(struct wiphy *wiphy)
        }
 
        cfg80211_debugfs_rdev_add(rdev);
-       mutex_unlock(&cfg80211_mutex);
-
-       /*
-        * due to a locking dependency this has to be outside of the
-        * cfg80211_mutex lock
-        */
-       res = rfkill_register(rdev->rfkill);
-       if (res) {
-               device_del(&rdev->wiphy.dev);
-
-               mutex_lock(&cfg80211_mutex);
-               debugfs_remove_recursive(rdev->wiphy.debugfsdir);
-               list_del_rcu(&rdev->list);
-               wiphy_regulatory_deregister(wiphy);
-               mutex_unlock(&cfg80211_mutex);
-               return res;
-       }
 
-       rtnl_lock();
        rdev->wiphy.registered = true;
        rtnl_unlock();
        return 0;
@@ -679,25 +625,19 @@ void wiphy_unregister(struct wiphy *wiphy)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
-       rtnl_lock();
-       rdev->wiphy.registered = false;
-       rtnl_unlock();
-
-       rfkill_unregister(rdev->rfkill);
-
-       /* protect the device list */
-       mutex_lock(&cfg80211_mutex);
-
        wait_event(rdev->dev_wait, ({
                int __count;
-               mutex_lock(&rdev->devlist_mtx);
+               rtnl_lock();
                __count = rdev->opencount;
-               mutex_unlock(&rdev->devlist_mtx);
+               rtnl_unlock();
                __count == 0; }));
 
-       mutex_lock(&rdev->devlist_mtx);
+       rfkill_unregister(rdev->rfkill);
+
+       rtnl_lock();
+       rdev->wiphy.registered = false;
+
        BUG_ON(!list_empty(&rdev->wdev_list));
-       mutex_unlock(&rdev->devlist_mtx);
 
        /*
         * First remove the hardware from everywhere, this makes
@@ -707,20 +647,6 @@ void wiphy_unregister(struct wiphy *wiphy)
        list_del_rcu(&rdev->list);
        synchronize_rcu();
 
-       /*
-        * Try to grab rdev->mtx. If a command is still in progress,
-        * hopefully the driver will refuse it since it's tearing
-        * down the device already. We wait for this command to complete
-        * before unlinking the item from the list.
-        * Note: as codified by the BUG_ON above we cannot get here if
-        * a virtual interface is still present. Hence, we can only get
-        * to lock contention here if userspace issues a command that
-        * identified the hardware by wiphy index.
-        */
-       cfg80211_lock_rdev(rdev);
-       /* nothing */
-       cfg80211_unlock_rdev(rdev);
-
        /*
         * If this device got a regulatory hint tell core its
         * free to listen now to a new shiny device regulatory hint
@@ -730,15 +656,17 @@ void wiphy_unregister(struct wiphy *wiphy)
        cfg80211_rdev_list_generation++;
        device_del(&rdev->wiphy.dev);
 
-       mutex_unlock(&cfg80211_mutex);
+       rtnl_unlock();
 
        flush_work(&rdev->scan_done_wk);
        cancel_work_sync(&rdev->conn_work);
        flush_work(&rdev->event_work);
        cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
 
-       if (rdev->wowlan && rdev->ops->set_wakeup)
+#ifdef CONFIG_PM
+       if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
                rdev_set_wakeup(rdev, false);
+#endif
        cfg80211_rdev_free_wowlan(rdev);
 }
 EXPORT_SYMBOL(wiphy_unregister);
@@ -748,9 +676,6 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
        struct cfg80211_internal_bss *scan, *tmp;
        struct cfg80211_beacon_registration *reg, *treg;
        rfkill_destroy(rdev->rfkill);
-       mutex_destroy(&rdev->mtx);
-       mutex_destroy(&rdev->devlist_mtx);
-       mutex_destroy(&rdev->sched_scan_mtx);
        list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) {
                list_del(&reg->list);
                kfree(reg);
@@ -775,36 +700,6 @@ void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
 }
 EXPORT_SYMBOL(wiphy_rfkill_set_hw_state);
 
-static void wdev_cleanup_work(struct work_struct *work)
-{
-       struct wireless_dev *wdev;
-       struct cfg80211_registered_device *rdev;
-
-       wdev = container_of(work, struct wireless_dev, cleanup_work);
-       rdev = wiphy_to_dev(wdev->wiphy);
-
-       mutex_lock(&rdev->sched_scan_mtx);
-
-       if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
-               rdev->scan_req->aborted = true;
-               ___cfg80211_scan_done(rdev, true);
-       }
-
-       if (WARN_ON(rdev->sched_scan_req &&
-                   rdev->sched_scan_req->dev == wdev->netdev)) {
-               __cfg80211_stop_sched_scan(rdev, false);
-       }
-
-       mutex_unlock(&rdev->sched_scan_mtx);
-
-       mutex_lock(&rdev->devlist_mtx);
-       rdev->opencount--;
-       mutex_unlock(&rdev->devlist_mtx);
-       wake_up(&rdev->dev_wait);
-
-       dev_put(wdev->netdev);
-}
-
 void cfg80211_unregister_wdev(struct wireless_dev *wdev)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -814,8 +709,6 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
        if (WARN_ON(wdev->netdev))
                return;
 
-       mutex_lock(&rdev->devlist_mtx);
-       mutex_lock(&rdev->sched_scan_mtx);
        list_del_rcu(&wdev->list);
        rdev->devlist_generation++;
 
@@ -827,8 +720,6 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
                WARN_ON_ONCE(1);
                break;
        }
-       mutex_unlock(&rdev->sched_scan_mtx);
-       mutex_unlock(&rdev->devlist_mtx);
 }
 EXPORT_SYMBOL(cfg80211_unregister_wdev);
 
@@ -847,7 +738,7 @@ void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
 }
 
 void cfg80211_leave(struct cfg80211_registered_device *rdev,
-                  struct wireless_dev *wdev)
+                   struct wireless_dev *wdev)
 {
        struct net_device *dev = wdev->netdev;
 
@@ -857,9 +748,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
                break;
        case NL80211_IFTYPE_P2P_CLIENT:
        case NL80211_IFTYPE_STATION:
-               mutex_lock(&rdev->sched_scan_mtx);
                __cfg80211_stop_sched_scan(rdev, false);
-               mutex_unlock(&rdev->sched_scan_mtx);
 
                wdev_lock(wdev);
 #ifdef CONFIG_CFG80211_WEXT
@@ -868,8 +757,8 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
                wdev->wext.ie_len = 0;
                wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
 #endif
-               __cfg80211_disconnect(rdev, dev,
-                                     WLAN_REASON_DEAUTH_LEAVING, true);
+               cfg80211_disconnect(rdev, dev,
+                                   WLAN_REASON_DEAUTH_LEAVING, true);
                wdev_unlock(wdev);
                break;
        case NL80211_IFTYPE_MESH_POINT:
@@ -912,13 +801,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                 * are added with nl80211.
                 */
                mutex_init(&wdev->mtx);
-               INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work);
                INIT_LIST_HEAD(&wdev->event_list);
                spin_lock_init(&wdev->event_lock);
                INIT_LIST_HEAD(&wdev->mgmt_registrations);
                spin_lock_init(&wdev->mgmt_registrations_lock);
 
-               mutex_lock(&rdev->devlist_mtx);
                wdev->identifier = ++rdev->wdev_id;
                list_add_rcu(&wdev->list, &rdev->wdev_list);
                rdev->devlist_generation++;
@@ -930,8 +817,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                        pr_err("failed to add phy80211 symlink to netdev!\n");
                }
                wdev->netdev = dev;
-               wdev->sme_state = CFG80211_SME_IDLE;
-               mutex_unlock(&rdev->devlist_mtx);
 #ifdef CONFIG_CFG80211_WEXT
                wdev->wext.default_key = -1;
                wdev->wext.default_mgmt_key = -1;
@@ -957,26 +842,22 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                break;
        case NETDEV_DOWN:
                cfg80211_update_iface_num(rdev, wdev->iftype, -1);
-               dev_hold(dev);
-               queue_work(cfg80211_wq, &wdev->cleanup_work);
+               if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
+                       if (WARN_ON(!rdev->scan_req->notified))
+                               rdev->scan_req->aborted = true;
+                       ___cfg80211_scan_done(rdev, true);
+               }
+
+               if (WARN_ON(rdev->sched_scan_req &&
+                           rdev->sched_scan_req->dev == wdev->netdev)) {
+                       __cfg80211_stop_sched_scan(rdev, false);
+               }
+
+               rdev->opencount--;
+               wake_up(&rdev->dev_wait);
                break;
        case NETDEV_UP:
-               /*
-                * If we have a really quick DOWN/UP succession we may
-                * have this work still pending ... cancel it and see
-                * if it was pending, in which case we need to account
-                * for some of the work it would have done.
-                */
-               if (cancel_work_sync(&wdev->cleanup_work)) {
-                       mutex_lock(&rdev->devlist_mtx);
-                       rdev->opencount--;
-                       mutex_unlock(&rdev->devlist_mtx);
-                       dev_put(dev);
-               }
                cfg80211_update_iface_num(rdev, wdev->iftype, 1);
-               cfg80211_lock_rdev(rdev);
-               mutex_lock(&rdev->devlist_mtx);
-               mutex_lock(&rdev->sched_scan_mtx);
                wdev_lock(wdev);
                switch (wdev->iftype) {
 #ifdef CONFIG_CFG80211_WEXT
@@ -1008,10 +889,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                        break;
                }
                wdev_unlock(wdev);
-               mutex_unlock(&rdev->sched_scan_mtx);
                rdev->opencount++;
-               mutex_unlock(&rdev->devlist_mtx);
-               cfg80211_unlock_rdev(rdev);
 
                /*
                 * Configure power management to the driver here so that its
@@ -1027,12 +905,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                        }
                break;
        case NETDEV_UNREGISTER:
-               /*
-                * NB: cannot take rdev->mtx here because this may be
-                * called within code protected by it when interfaces
-                * are removed with nl80211.
-                */
-               mutex_lock(&rdev->devlist_mtx);
                /*
                 * It is possible to get NETDEV_UNREGISTER
                 * multiple times. To detect that, check
@@ -1049,7 +921,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                        kfree(wdev->wext.keys);
 #endif
                }
-               mutex_unlock(&rdev->devlist_mtx);
                /*
                 * synchronise (so that we won't find this netdev
                 * from other code any more) and then clear the list
@@ -1063,15 +934,19 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                 * freed.
                 */
                cfg80211_process_wdev_events(wdev);
+
+               if (WARN_ON(wdev->current_bss)) {
+                       cfg80211_unhold_bss(wdev->current_bss);
+                       cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
+                       wdev->current_bss = NULL;
+               }
                break;
        case NETDEV_PRE_UP:
                if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
                        return notifier_from_errno(-EOPNOTSUPP);
                if (rfkill_blocked(rdev->rfkill))
                        return notifier_from_errno(-ERFKILL);
-               mutex_lock(&rdev->devlist_mtx);
                ret = cfg80211_can_add_interface(rdev, wdev->iftype);
-               mutex_unlock(&rdev->devlist_mtx);
                if (ret)
                        return notifier_from_errno(ret);
                break;
@@ -1089,12 +964,10 @@ static void __net_exit cfg80211_pernet_exit(struct net *net)
        struct cfg80211_registered_device *rdev;
 
        rtnl_lock();
-       mutex_lock(&cfg80211_mutex);
        list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
                if (net_eq(wiphy_net(&rdev->wiphy), net))
                        WARN_ON(cfg80211_switch_netns(rdev, &init_net));
        }
-       mutex_unlock(&cfg80211_mutex);
        rtnl_unlock();
 }
 
index fd35dae547c446cbae459e0ea8c3f679320b952c..a6b45bf00f3357292a1ad6e3cc5a08fd6faf9a7a 100644 (file)
@@ -5,7 +5,6 @@
  */
 #ifndef __NET_WIRELESS_CORE_H
 #define __NET_WIRELESS_CORE_H
-#include <linux/mutex.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/rbtree.h>
 struct cfg80211_registered_device {
        const struct cfg80211_ops *ops;
        struct list_head list;
-       /* we hold this mutex during any call so that
-        * we cannot do multiple calls at once, and also
-        * to avoid the deregister call to proceed while
-        * any call is in progress */
-       struct mutex mtx;
 
        /* rfkill support */
        struct rfkill_ops rfkill_ops;
@@ -49,9 +43,7 @@ struct cfg80211_registered_device {
        /* wiphy index, internal only */
        int wiphy_idx;
 
-       /* associated wireless interfaces */
-       struct mutex devlist_mtx;
-       /* protected by devlist_mtx or RCU */
+       /* associated wireless interfaces, protected by rtnl or RCU */
        struct list_head wdev_list;
        int devlist_generation, wdev_id;
        int opencount; /* also protected by devlist_mtx */
@@ -75,8 +67,6 @@ struct cfg80211_registered_device {
        struct work_struct scan_done_wk;
        struct work_struct sched_scan_results_wk;
 
-       struct mutex sched_scan_mtx;
-
 #ifdef CONFIG_NL80211_TESTMODE
        struct genl_info *testmode_info;
 #endif
@@ -84,8 +74,6 @@ struct cfg80211_registered_device {
        struct work_struct conn_work;
        struct work_struct event_work;
 
-       struct cfg80211_wowlan *wowlan;
-
        struct delayed_work dfs_update_channels_wk;
 
        /* netlink port which started critical protocol (0 means not started) */
@@ -106,29 +94,26 @@ struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy)
 static inline void
 cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev)
 {
+#ifdef CONFIG_PM
        int i;
 
-       if (!rdev->wowlan)
+       if (!rdev->wiphy.wowlan_config)
                return;
-       for (i = 0; i < rdev->wowlan->n_patterns; i++)
-               kfree(rdev->wowlan->patterns[i].mask);
-       kfree(rdev->wowlan->patterns);
-       if (rdev->wowlan->tcp && rdev->wowlan->tcp->sock)
-               sock_release(rdev->wowlan->tcp->sock);
-       kfree(rdev->wowlan->tcp);
-       kfree(rdev->wowlan);
+       for (i = 0; i < rdev->wiphy.wowlan_config->n_patterns; i++)
+               kfree(rdev->wiphy.wowlan_config->patterns[i].mask);
+       kfree(rdev->wiphy.wowlan_config->patterns);
+       if (rdev->wiphy.wowlan_config->tcp &&
+           rdev->wiphy.wowlan_config->tcp->sock)
+               sock_release(rdev->wiphy.wowlan_config->tcp->sock);
+       kfree(rdev->wiphy.wowlan_config->tcp);
+       kfree(rdev->wiphy.wowlan_config);
+#endif
 }
 
 extern struct workqueue_struct *cfg80211_wq;
-extern struct mutex cfg80211_mutex;
 extern struct list_head cfg80211_rdev_list;
 extern int cfg80211_rdev_list_generation;
 
-static inline void assert_cfg80211_lock(void)
-{
-       lockdep_assert_held(&cfg80211_mutex);
-}
-
 struct cfg80211_internal_bss {
        struct list_head list;
        struct list_head hidden_list;
@@ -161,27 +146,11 @@ static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss)
 struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx);
 int get_wiphy_idx(struct wiphy *wiphy);
 
-/* requires cfg80211_rdev_mutex to be held! */
 struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx);
 
-/* identical to cfg80211_get_dev_from_info but only operate on ifindex */
-extern struct cfg80211_registered_device *
-cfg80211_get_dev_from_ifindex(struct net *net, int ifindex);
-
 int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
                          struct net *net);
 
-static inline void cfg80211_lock_rdev(struct cfg80211_registered_device *rdev)
-{
-       mutex_lock(&rdev->mtx);
-}
-
-static inline void cfg80211_unlock_rdev(struct cfg80211_registered_device *rdev)
-{
-       BUG_ON(IS_ERR(rdev) || !rdev);
-       mutex_unlock(&rdev->mtx);
-}
-
 static inline void wdev_lock(struct wireless_dev *wdev)
        __acquires(wdev)
 {
@@ -196,7 +165,7 @@ static inline void wdev_unlock(struct wireless_dev *wdev)
        mutex_unlock(&wdev->mtx);
 }
 
-#define ASSERT_RDEV_LOCK(rdev) lockdep_assert_held(&(rdev)->mtx)
+#define ASSERT_RDEV_LOCK(rdev) ASSERT_RTNL()
 #define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx)
 
 static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev)
@@ -314,38 +283,21 @@ int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
                     struct net_device *dev);
 
 /* MLME */
-int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
-                        struct net_device *dev,
-                        struct ieee80211_channel *chan,
-                        enum nl80211_auth_type auth_type,
-                        const u8 *bssid,
-                        const u8 *ssid, int ssid_len,
-                        const u8 *ie, int ie_len,
-                        const u8 *key, int key_len, int key_idx,
-                        const u8 *sae_data, int sae_data_len);
 int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
-                      struct net_device *dev, struct ieee80211_channel *chan,
-                      enum nl80211_auth_type auth_type, const u8 *bssid,
+                      struct net_device *dev,
+                      struct ieee80211_channel *chan,
+                      enum nl80211_auth_type auth_type,
+                      const u8 *bssid,
                       const u8 *ssid, int ssid_len,
                       const u8 *ie, int ie_len,
                       const u8 *key, int key_len, int key_idx,
                       const u8 *sae_data, int sae_data_len);
-int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
-                         struct net_device *dev,
-                         struct ieee80211_channel *chan,
-                         const u8 *bssid,
-                         const u8 *ssid, int ssid_len,
-                         struct cfg80211_assoc_request *req);
 int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
                        struct net_device *dev,
                        struct ieee80211_channel *chan,
                        const u8 *bssid,
                        const u8 *ssid, int ssid_len,
                        struct cfg80211_assoc_request *req);
-int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
-                          struct net_device *dev, const u8 *bssid,
-                          const u8 *ie, int ie_len, u16 reason,
-                          bool local_state_change);
 int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
                         struct net_device *dev, const u8 *bssid,
                         const u8 *ie, int ie_len, u16 reason,
@@ -356,11 +308,6 @@ int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
                           bool local_state_change);
 void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
                        struct net_device *dev);
-void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
-                              const u8 *req_ie, size_t req_ie_len,
-                              const u8 *resp_ie, size_t resp_ie_len,
-                              u16 status, bool wextev,
-                              struct cfg80211_bss *bss);
 int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
                                u16 frame_type, const u8 *match_data,
                                int match_len);
@@ -376,19 +323,19 @@ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
 void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
                                const struct ieee80211_vht_cap *vht_capa_mask);
 
-/* SME */
-int __cfg80211_connect(struct cfg80211_registered_device *rdev,
-                      struct net_device *dev,
-                      struct cfg80211_connect_params *connect,
-                      struct cfg80211_cached_keys *connkeys,
-                      const u8 *prev_bssid);
+/* SME events */
 int cfg80211_connect(struct cfg80211_registered_device *rdev,
                     struct net_device *dev,
                     struct cfg80211_connect_params *connect,
-                    struct cfg80211_cached_keys *connkeys);
-int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
-                         struct net_device *dev, u16 reason,
-                         bool wextev);
+                    struct cfg80211_cached_keys *connkeys,
+                    const u8 *prev_bssid);
+void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
+                              const u8 *req_ie, size_t req_ie_len,
+                              const u8 *resp_ie, size_t resp_ie_len,
+                              u16 status, bool wextev,
+                              struct cfg80211_bss *bss);
+void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
+                            size_t ie_len, u16 reason, bool from_ap);
 int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
                        struct net_device *dev, u16 reason,
                        bool wextev);
@@ -399,21 +346,21 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
 int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
                              struct wireless_dev *wdev);
 
+/* SME implementation */
 void cfg80211_conn_work(struct work_struct *work);
-void cfg80211_sme_failed_assoc(struct wireless_dev *wdev);
-bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev);
+void cfg80211_sme_scan_done(struct net_device *dev);
+bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status);
+void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len);
+void cfg80211_sme_disassoc(struct wireless_dev *wdev);
+void cfg80211_sme_deauth(struct wireless_dev *wdev);
+void cfg80211_sme_auth_timeout(struct wireless_dev *wdev);
+void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev);
 
 /* internal helpers */
 bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher);
 int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
                                   struct key_params *params, int key_idx,
                                   bool pairwise, const u8 *mac_addr);
-void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
-                            size_t ie_len, u16 reason, bool from_ap);
-void cfg80211_sme_scan_done(struct net_device *dev);
-void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len);
-void cfg80211_sme_disassoc(struct net_device *dev,
-                          struct cfg80211_internal_bss *bss);
 void __cfg80211_scan_done(struct work_struct *wk);
 void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak);
 void __cfg80211_sched_scan_results(struct work_struct *wk);
index 920cabe0461b30cd7c5ef9d59e0f010a3bfa4297..90d0500366248900f2aebcac8d57f295d229d764 100644 (file)
@@ -74,7 +74,7 @@ static ssize_t ht40allow_map_read(struct file *file,
        if (!buf)
                return -ENOMEM;
 
-       mutex_lock(&cfg80211_mutex);
+       rtnl_lock();
 
        for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
                sband = wiphy->bands[band];
@@ -85,7 +85,7 @@ static ssize_t ht40allow_map_read(struct file *file,
                                                buf, buf_size, offset);
        }
 
-       mutex_unlock(&cfg80211_mutex);
+       rtnl_unlock();
 
        r = simple_read_from_buffer(user_buf, count, ppos, buf, offset);
 
index d80e47194d49ee7f9ead039b2fae2bc0fbcdb13a..39bff7d367687fbdd4d220d76e34d66e4e586a9c 100644 (file)
@@ -43,7 +43,6 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
        cfg80211_hold_bss(bss_from_pub(bss));
        wdev->current_bss = bss_from_pub(bss);
 
-       wdev->sme_state = CFG80211_SME_CONNECTED;
        cfg80211_upload_connect_keys(wdev);
 
        nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid,
@@ -64,8 +63,6 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
 
        trace_cfg80211_ibss_joined(dev, bssid);
 
-       CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
-
        ev = kzalloc(sizeof(*ev), gfp);
        if (!ev)
                return;
@@ -120,7 +117,6 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
 #ifdef CONFIG_CFG80211_WEXT
        wdev->wext.ibss.chandef = params->chandef;
 #endif
-       wdev->sme_state = CFG80211_SME_CONNECTING;
 
        err = cfg80211_can_use_chan(rdev, wdev, params->chandef.chan,
                                    params->channel_fixed
@@ -134,7 +130,6 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
        err = rdev_join_ibss(rdev, dev, params);
        if (err) {
                wdev->connect_keys = NULL;
-               wdev->sme_state = CFG80211_SME_IDLE;
                return err;
        }
 
@@ -152,11 +147,11 @@ int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        int err;
 
-       mutex_lock(&rdev->devlist_mtx);
+       ASSERT_RTNL();
+
        wdev_lock(wdev);
        err = __cfg80211_join_ibss(rdev, dev, params, connkeys);
        wdev_unlock(wdev);
-       mutex_unlock(&rdev->devlist_mtx);
 
        return err;
 }
@@ -186,7 +181,6 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
        }
 
        wdev->current_bss = NULL;
-       wdev->sme_state = CFG80211_SME_IDLE;
        wdev->ssid_len = 0;
 #ifdef CONFIG_CFG80211_WEXT
        if (!nowext)
@@ -359,11 +353,9 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
                wdev->wext.ibss.channel_fixed = false;
        }
 
-       mutex_lock(&rdev->devlist_mtx);
        wdev_lock(wdev);
        err = cfg80211_ibss_wext_join(rdev, wdev);
        wdev_unlock(wdev);
-       mutex_unlock(&rdev->devlist_mtx);
 
        return err;
 }
@@ -429,11 +421,9 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
        memcpy(wdev->wext.ibss.ssid, ssid, len);
        wdev->wext.ibss.ssid_len = len;
 
-       mutex_lock(&rdev->devlist_mtx);
        wdev_lock(wdev);
        err = cfg80211_ibss_wext_join(rdev, wdev);
        wdev_unlock(wdev);
-       mutex_unlock(&rdev->devlist_mtx);
 
        return err;
 }
@@ -512,11 +502,9 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
        } else
                wdev->wext.ibss.bssid = NULL;
 
-       mutex_lock(&rdev->devlist_mtx);
        wdev_lock(wdev);
        err = cfg80211_ibss_wext_join(rdev, wdev);
        wdev_unlock(wdev);
-       mutex_unlock(&rdev->devlist_mtx);
 
        return err;
 }
index 0bb93f3061a4353db1da049a384aa0cd2f9d10ab..30c49202ee4d12804b78bf4cc26e2766c4cc029e 100644 (file)
@@ -18,6 +18,7 @@
 #define MESH_PATH_TO_ROOT_TIMEOUT      6000
 #define MESH_ROOT_INTERVAL     5000
 #define MESH_ROOT_CONFIRMATION_INTERVAL 2000
+#define MESH_DEFAULT_PLINK_TIMEOUT     1800 /* timeout in seconds */
 
 /*
  * Minimum interval between two consecutive PREQs originated by the same
@@ -75,6 +76,7 @@ const struct mesh_config default_mesh_config = {
        .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL,
        .power_mode = NL80211_MESH_POWER_ACTIVE,
        .dot11MeshAwakeWindowDuration = MESH_DEFAULT_AWAKE_WINDOW,
+       .plink_timeout = MESH_DEFAULT_PLINK_TIMEOUT,
 };
 
 const struct mesh_setup default_mesh_setup = {
@@ -82,6 +84,7 @@ const struct mesh_setup default_mesh_setup = {
        .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
        .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
        .path_metric = IEEE80211_PATH_METRIC_AIRTIME,
+       .auth_id = 0, /* open */
        .ie = NULL,
        .ie_len = 0,
        .is_secure = false,
@@ -159,6 +162,16 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
                setup->chandef.center_freq1 = setup->chandef.chan->center_freq;
        }
 
+       /*
+        * check if basic rates are available otherwise use mandatory rates as
+        * basic rates
+        */
+       if (!setup->basic_rates) {
+               struct ieee80211_supported_band *sband =
+                               rdev->wiphy.bands[setup->chandef.chan->band];
+               setup->basic_rates = ieee80211_mandatory_rates(sband);
+       }
+
        if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef))
                return -EINVAL;
 
@@ -185,11 +198,9 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        int err;
 
-       mutex_lock(&rdev->devlist_mtx);
        wdev_lock(wdev);
        err = __cfg80211_join_mesh(rdev, dev, setup, conf);
        wdev_unlock(wdev);
-       mutex_unlock(&rdev->devlist_mtx);
 
        return err;
 }
index 0c7b7dd855f650f6bb9f6ba4e8ecbc479e8637d1..bfac5e186f579f8b6d526de5d100a77083e45696 100644 (file)
 #include "rdev-ops.h"
 
 
-void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
-{
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
-
-       trace_cfg80211_send_rx_auth(dev);
-       wdev_lock(wdev);
-
-       nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
-       cfg80211_sme_rx_auth(dev, buf, len);
-
-       wdev_unlock(wdev);
-}
-EXPORT_SYMBOL(cfg80211_send_rx_auth);
-
-void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
+void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
                            const u8 *buf, size_t len)
 {
-       u16 status_code;
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
        struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
        u8 *ie = mgmt->u.assoc_resp.variable;
        int ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
+       u16 status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
 
        trace_cfg80211_send_rx_assoc(dev, bss);
-       wdev_lock(wdev);
-
-       status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
 
        /*
         * This is a bit of a hack, we don't notify userspace of
@@ -56,174 +37,135 @@ void cfg80211_send_rx_assoc(struct net_device *dev, struct cfg80211_bss *bss,
         * and got a reject -- we only try again with an assoc
         * frame instead of reassoc.
         */
-       if (status_code != WLAN_STATUS_SUCCESS && wdev->conn &&
-           cfg80211_sme_failed_reassoc(wdev)) {
+       if (cfg80211_sme_rx_assoc_resp(wdev, status_code)) {
+               cfg80211_unhold_bss(bss_from_pub(bss));
                cfg80211_put_bss(wiphy, bss);
-               goto out;
+               return;
        }
 
        nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL);
-
-       if (status_code != WLAN_STATUS_SUCCESS && wdev->conn) {
-               cfg80211_sme_failed_assoc(wdev);
-               /*
-                * do not call connect_result() now because the
-                * sme will schedule work that does it later.
-                */
-               cfg80211_put_bss(wiphy, bss);
-               goto out;
-       }
-
-       if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) {
-               /*
-                * This is for the userspace SME, the CONNECTING
-                * state will be changed to CONNECTED by
-                * __cfg80211_connect_result() below.
-                */
-               wdev->sme_state = CFG80211_SME_CONNECTING;
-       }
-
-       /* this consumes the bss reference */
+       /* update current_bss etc., consumes the bss reference */
        __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
                                  status_code,
                                  status_code == WLAN_STATUS_SUCCESS, bss);
- out:
-       wdev_unlock(wdev);
 }
-EXPORT_SYMBOL(cfg80211_send_rx_assoc);
+EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
 
-void __cfg80211_send_deauth(struct net_device *dev,
-                                  const u8 *buf, size_t len)
+static void cfg80211_process_auth(struct wireless_dev *wdev,
+                                 const u8 *buf, size_t len)
 {
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
-       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
-       const u8 *bssid = mgmt->bssid;
-       bool was_current = false;
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
 
-       trace___cfg80211_send_deauth(dev);
-       ASSERT_WDEV_LOCK(wdev);
-
-       if (wdev->current_bss &&
-           ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
-               cfg80211_unhold_bss(wdev->current_bss);
-               cfg80211_put_bss(wiphy, &wdev->current_bss->pub);
-               wdev->current_bss = NULL;
-               was_current = true;
-       }
+       nl80211_send_rx_auth(rdev, wdev->netdev, buf, len, GFP_KERNEL);
+       cfg80211_sme_rx_auth(wdev, buf, len);
+}
 
-       nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
+static void cfg80211_process_deauth(struct wireless_dev *wdev,
+                                   const u8 *buf, size_t len)
+{
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
+       const u8 *bssid = mgmt->bssid;
+       u16 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
+       bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr);
 
-       if (wdev->sme_state == CFG80211_SME_CONNECTED && was_current) {
-               u16 reason_code;
-               bool from_ap;
+       nl80211_send_deauth(rdev, wdev->netdev, buf, len, GFP_KERNEL);
 
-               reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
+       if (!wdev->current_bss ||
+           !ether_addr_equal(wdev->current_bss->pub.bssid, bssid))
+               return;
 
-               from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr);
-               __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
-       } else if (wdev->sme_state == CFG80211_SME_CONNECTING) {
-               __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0,
-                                         WLAN_STATUS_UNSPECIFIED_FAILURE,
-                                         false, NULL);
-       }
+       __cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap);
+       cfg80211_sme_deauth(wdev);
 }
-EXPORT_SYMBOL(__cfg80211_send_deauth);
 
-void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len)
+static void cfg80211_process_disassoc(struct wireless_dev *wdev,
+                                     const u8 *buf, size_t len)
 {
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
+       const u8 *bssid = mgmt->bssid;
+       u16 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
+       bool from_ap = !ether_addr_equal(mgmt->sa, wdev->netdev->dev_addr);
 
-       wdev_lock(wdev);
-       __cfg80211_send_deauth(dev, buf, len);
-       wdev_unlock(wdev);
+       nl80211_send_disassoc(rdev, wdev->netdev, buf, len, GFP_KERNEL);
+
+       if (WARN_ON(!wdev->current_bss ||
+                   !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
+               return;
+
+       __cfg80211_disconnected(wdev->netdev, NULL, 0, reason_code, from_ap);
+       cfg80211_sme_disassoc(wdev);
 }
-EXPORT_SYMBOL(cfg80211_send_deauth);
 
-void __cfg80211_send_disassoc(struct net_device *dev,
-                                    const u8 *buf, size_t len)
+void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
-       struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
-       const u8 *bssid = mgmt->bssid;
-       u16 reason_code;
-       bool from_ap;
+       struct ieee80211_mgmt *mgmt = (void *)buf;
 
-       trace___cfg80211_send_disassoc(dev);
        ASSERT_WDEV_LOCK(wdev);
 
-       nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL);
+       trace_cfg80211_rx_mlme_mgmt(dev, buf, len);
 
-       if (wdev->sme_state != CFG80211_SME_CONNECTED)
+       if (WARN_ON(len < 2))
                return;
 
-       if (wdev->current_bss &&
-           ether_addr_equal(wdev->current_bss->pub.bssid, bssid)) {
-               cfg80211_sme_disassoc(dev, wdev->current_bss);
-               cfg80211_unhold_bss(wdev->current_bss);
-               cfg80211_put_bss(wiphy, &wdev->current_bss->pub);
-               wdev->current_bss = NULL;
-       } else
-               WARN_ON(1);
-
-
-       reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
-
-       from_ap = !ether_addr_equal(mgmt->sa, dev->dev_addr);
-       __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
+       if (ieee80211_is_auth(mgmt->frame_control))
+               cfg80211_process_auth(wdev, buf, len);
+       else if (ieee80211_is_deauth(mgmt->frame_control))
+               cfg80211_process_deauth(wdev, buf, len);
+       else if (ieee80211_is_disassoc(mgmt->frame_control))
+               cfg80211_process_disassoc(wdev, buf, len);
 }
-EXPORT_SYMBOL(__cfg80211_send_disassoc);
+EXPORT_SYMBOL(cfg80211_rx_mlme_mgmt);
 
-void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len)
+void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
+       struct wiphy *wiphy = wdev->wiphy;
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+       trace_cfg80211_send_auth_timeout(dev, addr);
 
-       wdev_lock(wdev);
-       __cfg80211_send_disassoc(dev, buf, len);
-       wdev_unlock(wdev);
+       nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL);
+       cfg80211_sme_auth_timeout(wdev);
 }
-EXPORT_SYMBOL(cfg80211_send_disassoc);
+EXPORT_SYMBOL(cfg80211_auth_timeout);
 
-void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
+void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
 
-       trace_cfg80211_send_auth_timeout(dev, addr);
-       wdev_lock(wdev);
+       trace_cfg80211_send_assoc_timeout(dev, bss->bssid);
 
-       nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL);
-       if (wdev->sme_state == CFG80211_SME_CONNECTING)
-               __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0,
-                                         WLAN_STATUS_UNSPECIFIED_FAILURE,
-                                         false, NULL);
+       nl80211_send_assoc_timeout(rdev, dev, bss->bssid, GFP_KERNEL);
+       cfg80211_sme_assoc_timeout(wdev);
 
-       wdev_unlock(wdev);
+       cfg80211_unhold_bss(bss_from_pub(bss));
+       cfg80211_put_bss(wiphy, bss);
 }
-EXPORT_SYMBOL(cfg80211_send_auth_timeout);
+EXPORT_SYMBOL(cfg80211_assoc_timeout);
 
-void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr)
+void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct ieee80211_mgmt *mgmt = (void *)buf;
 
-       trace_cfg80211_send_assoc_timeout(dev, addr);
-       wdev_lock(wdev);
+       ASSERT_WDEV_LOCK(wdev);
 
-       nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL);
-       if (wdev->sme_state == CFG80211_SME_CONNECTING)
-               __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0,
-                                         WLAN_STATUS_UNSPECIFIED_FAILURE,
-                                         false, NULL);
+       trace_cfg80211_tx_mlme_mgmt(dev, buf, len);
 
-       wdev_unlock(wdev);
+       if (WARN_ON(len < 2))
+               return;
+
+       if (ieee80211_is_deauth(mgmt->frame_control))
+               cfg80211_process_deauth(wdev, buf, len);
+       else
+               cfg80211_process_disassoc(wdev, buf, len);
 }
-EXPORT_SYMBOL(cfg80211_send_assoc_timeout);
+EXPORT_SYMBOL(cfg80211_tx_mlme_mgmt);
 
 void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
                                  enum nl80211_key_type key_type, int key_id,
@@ -253,18 +195,27 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
 EXPORT_SYMBOL(cfg80211_michael_mic_failure);
 
 /* some MLME handling for userspace SME */
-int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
-                        struct net_device *dev,
-                        struct ieee80211_channel *chan,
-                        enum nl80211_auth_type auth_type,
-                        const u8 *bssid,
-                        const u8 *ssid, int ssid_len,
-                        const u8 *ie, int ie_len,
-                        const u8 *key, int key_len, int key_idx,
-                        const u8 *sae_data, int sae_data_len)
+int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
+                      struct net_device *dev,
+                      struct ieee80211_channel *chan,
+                      enum nl80211_auth_type auth_type,
+                      const u8 *bssid,
+                      const u8 *ssid, int ssid_len,
+                      const u8 *ie, int ie_len,
+                      const u8 *key, int key_len, int key_idx,
+                      const u8 *sae_data, int sae_data_len)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_auth_request req;
+       struct cfg80211_auth_request req = {
+               .ie = ie,
+               .ie_len = ie_len,
+               .sae_data = sae_data,
+               .sae_data_len = sae_data_len,
+               .auth_type = auth_type,
+               .key = key,
+               .key_len = key_len,
+               .key_idx = key_idx,
+       };
        int err;
 
        ASSERT_WDEV_LOCK(wdev);
@@ -277,18 +228,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
            ether_addr_equal(bssid, wdev->current_bss->pub.bssid))
                return -EALREADY;
 
-       memset(&req, 0, sizeof(req));
-
-       req.ie = ie;
-       req.ie_len = ie_len;
-       req.sae_data = sae_data;
-       req.sae_data_len = sae_data_len;
-       req.auth_type = auth_type;
        req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
                                   WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
-       req.key = key;
-       req.key_len = key_len;
-       req.key_idx = key_idx;
        if (!req.bss)
                return -ENOENT;
 
@@ -304,28 +245,6 @@ out:
        return err;
 }
 
-int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
-                      struct net_device *dev, struct ieee80211_channel *chan,
-                      enum nl80211_auth_type auth_type, const u8 *bssid,
-                      const u8 *ssid, int ssid_len,
-                      const u8 *ie, int ie_len,
-                      const u8 *key, int key_len, int key_idx,
-                      const u8 *sae_data, int sae_data_len)
-{
-       int err;
-
-       mutex_lock(&rdev->devlist_mtx);
-       wdev_lock(dev->ieee80211_ptr);
-       err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
-                                  ssid, ssid_len, ie, ie_len,
-                                  key, key_len, key_idx,
-                                  sae_data, sae_data_len);
-       wdev_unlock(dev->ieee80211_ptr);
-       mutex_unlock(&rdev->devlist_mtx);
-
-       return err;
-}
-
 /*  Do a logical ht_capa &= ht_capa_mask.  */
 void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
                               const struct ieee80211_ht_cap *ht_capa_mask)
@@ -360,30 +279,21 @@ void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
                p1[i] &= p2[i];
 }
 
-int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
-                         struct net_device *dev,
-                         struct ieee80211_channel *chan,
-                         const u8 *bssid,
-                         const u8 *ssid, int ssid_len,
-                         struct cfg80211_assoc_request *req)
+int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+                       struct net_device *dev,
+                       struct ieee80211_channel *chan,
+                       const u8 *bssid,
+                       const u8 *ssid, int ssid_len,
+                       struct cfg80211_assoc_request *req)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        int err;
-       bool was_connected = false;
 
        ASSERT_WDEV_LOCK(wdev);
 
-       if (wdev->current_bss && req->prev_bssid &&
-           ether_addr_equal(wdev->current_bss->pub.bssid, req->prev_bssid)) {
-               /*
-                * Trying to reassociate: Allow this to proceed and let the old
-                * association to be dropped when the new one is completed.
-                */
-               if (wdev->sme_state == CFG80211_SME_CONNECTED) {
-                       was_connected = true;
-                       wdev->sme_state = CFG80211_SME_CONNECTING;
-               }
-       } else if (wdev->current_bss)
+       if (wdev->current_bss &&
+           (!req->prev_bssid || !ether_addr_equal(wdev->current_bss->pub.bssid,
+                                                  req->prev_bssid)))
                return -EALREADY;
 
        cfg80211_oper_and_ht_capa(&req->ht_capa_mask,
@@ -393,52 +303,28 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
 
        req->bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
                                    WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
-       if (!req->bss) {
-               if (was_connected)
-                       wdev->sme_state = CFG80211_SME_CONNECTED;
+       if (!req->bss)
                return -ENOENT;
-       }
 
        err = cfg80211_can_use_chan(rdev, wdev, chan, CHAN_MODE_SHARED);
        if (err)
                goto out;
 
        err = rdev_assoc(rdev, dev, req);
+       if (!err)
+               cfg80211_hold_bss(bss_from_pub(req->bss));
 
 out:
-       if (err) {
-               if (was_connected)
-                       wdev->sme_state = CFG80211_SME_CONNECTED;
+       if (err)
                cfg80211_put_bss(&rdev->wiphy, req->bss);
-       }
 
        return err;
 }
 
-int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
-                       struct net_device *dev,
-                       struct ieee80211_channel *chan,
-                       const u8 *bssid,
-                       const u8 *ssid, int ssid_len,
-                       struct cfg80211_assoc_request *req)
-{
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-       int err;
-
-       mutex_lock(&rdev->devlist_mtx);
-       wdev_lock(wdev);
-       err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid,
-                                   ssid, ssid_len, req);
-       wdev_unlock(wdev);
-       mutex_unlock(&rdev->devlist_mtx);
-
-       return err;
-}
-
-int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
-                          struct net_device *dev, const u8 *bssid,
-                          const u8 *ie, int ie_len, u16 reason,
-                          bool local_state_change)
+int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
+                        struct net_device *dev, const u8 *bssid,
+                        const u8 *ie, int ie_len, u16 reason,
+                        bool local_state_change)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_deauth_request req = {
@@ -451,79 +337,51 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
 
        ASSERT_WDEV_LOCK(wdev);
 
-       if (local_state_change && (!wdev->current_bss ||
-           !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
+       if (local_state_change &&
+           (!wdev->current_bss ||
+            !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
                return 0;
 
        return rdev_deauth(rdev, dev, &req);
 }
 
-int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
-                        struct net_device *dev, const u8 *bssid,
-                        const u8 *ie, int ie_len, u16 reason,
-                        bool local_state_change)
+int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
+                          struct net_device *dev, const u8 *bssid,
+                          const u8 *ie, int ie_len, u16 reason,
+                          bool local_state_change)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
+       struct cfg80211_disassoc_request req = {
+               .reason_code = reason,
+               .local_state_change = local_state_change,
+               .ie = ie,
+               .ie_len = ie_len,
+       };
        int err;
 
-       wdev_lock(wdev);
-       err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason,
-                                    local_state_change);
-       wdev_unlock(wdev);
-
-       return err;
-}
-
-static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
-                                   struct net_device *dev, const u8 *bssid,
-                                   const u8 *ie, int ie_len, u16 reason,
-                                   bool local_state_change)
-{
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_disassoc_request req;
-
        ASSERT_WDEV_LOCK(wdev);
 
-       if (wdev->sme_state != CFG80211_SME_CONNECTED)
-               return -ENOTCONN;
-
-       if (WARN(!wdev->current_bss, "sme_state=%d\n", wdev->sme_state))
+       if (!wdev->current_bss)
                return -ENOTCONN;
 
-       memset(&req, 0, sizeof(req));
-       req.reason_code = reason;
-       req.local_state_change = local_state_change;
-       req.ie = ie;
-       req.ie_len = ie_len;
        if (ether_addr_equal(wdev->current_bss->pub.bssid, bssid))
                req.bss = &wdev->current_bss->pub;
        else
                return -ENOTCONN;
 
-       return rdev_disassoc(rdev, dev, &req);
-}
-
-int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
-                          struct net_device *dev, const u8 *bssid,
-                          const u8 *ie, int ie_len, u16 reason,
-                          bool local_state_change)
-{
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-       int err;
-
-       wdev_lock(wdev);
-       err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason,
-                                      local_state_change);
-       wdev_unlock(wdev);
+       err = rdev_disassoc(rdev, dev, &req);
+       if (err)
+               return err;
 
-       return err;
+       /* driver should have reported the disassoc */
+       WARN_ON(wdev->current_bss);
+       return 0;
 }
 
 void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
                        struct net_device *dev)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_deauth_request req;
        u8 bssid[ETH_ALEN];
 
        ASSERT_WDEV_LOCK(wdev);
@@ -531,23 +389,12 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
        if (!rdev->ops->deauth)
                return;
 
-       memset(&req, 0, sizeof(req));
-       req.reason_code = WLAN_REASON_DEAUTH_LEAVING;
-       req.ie = NULL;
-       req.ie_len = 0;
-
        if (!wdev->current_bss)
                return;
 
        memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
-       req.bssid = bssid;
-       rdev_deauth(rdev, dev, &req);
-
-       if (wdev->current_bss) {
-               cfg80211_unhold_bss(wdev->current_bss);
-               cfg80211_put_bss(&rdev->wiphy, &wdev->current_bss->pub);
-               wdev->current_bss = NULL;
-       }
+       cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
+                            WLAN_REASON_DEAUTH_LEAVING, false);
 }
 
 struct cfg80211_mgmt_registration {
@@ -848,7 +695,7 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work)
                            dfs_update_channels_wk);
        wiphy = &rdev->wiphy;
 
-       mutex_lock(&cfg80211_mutex);
+       rtnl_lock();
        for (bandid = 0; bandid < IEEE80211_NUM_BANDS; bandid++) {
                sband = wiphy->bands[bandid];
                if (!sband)
@@ -881,7 +728,7 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work)
                        check_again = true;
                }
        }
-       mutex_unlock(&cfg80211_mutex);
+       rtnl_unlock();
 
        /* reschedule if there are other channels waiting to be cleared again */
        if (check_again)
index b14b7e3cb6e65a76517752872753b53264baa7b4..7dc3343427c198b89342e5014af4412a5f24d2b7 100644 (file)
@@ -37,10 +37,10 @@ static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb,
 
 /* the netlink family */
 static struct genl_family nl80211_fam = {
-       .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */
-       .name = "nl80211",      /* have users key off the name instead */
-       .hdrsize = 0,           /* no private header */
-       .version = 1,           /* no particular meaning now */
+       .id = GENL_ID_GENERATE,         /* don't bother with a hardcoded ID */
+       .name = NL80211_GENL_NAME,      /* have users key off the name instead */
+       .hdrsize = 0,                   /* no private header */
+       .version = 1,                   /* no particular meaning now */
        .maxattr = NL80211_ATTR_MAX,
        .netnsok = true,
        .pre_doit = nl80211_pre_doit,
@@ -59,7 +59,7 @@ __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs)
        int wiphy_idx = -1;
        int ifidx = -1;
 
-       assert_cfg80211_lock();
+       ASSERT_RTNL();
 
        if (!have_ifidx && !have_wdev_id)
                return ERR_PTR(-EINVAL);
@@ -80,7 +80,6 @@ __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs)
                if (have_wdev_id && rdev->wiphy_idx != wiphy_idx)
                        continue;
 
-               mutex_lock(&rdev->devlist_mtx);
                list_for_each_entry(wdev, &rdev->wdev_list, list) {
                        if (have_ifidx && wdev->netdev &&
                            wdev->netdev->ifindex == ifidx) {
@@ -92,7 +91,6 @@ __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs)
                                break;
                        }
                }
-               mutex_unlock(&rdev->devlist_mtx);
 
                if (result)
                        break;
@@ -109,7 +107,7 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
        struct cfg80211_registered_device *rdev = NULL, *tmp;
        struct net_device *netdev;
 
-       assert_cfg80211_lock();
+       ASSERT_RTNL();
 
        if (!attrs[NL80211_ATTR_WIPHY] &&
            !attrs[NL80211_ATTR_IFINDEX] &&
@@ -128,14 +126,12 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
                tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32);
                if (tmp) {
                        /* make sure wdev exists */
-                       mutex_lock(&tmp->devlist_mtx);
                        list_for_each_entry(wdev, &tmp->wdev_list, list) {
                                if (wdev->identifier != (u32)wdev_id)
                                        continue;
                                found = true;
                                break;
                        }
-                       mutex_unlock(&tmp->devlist_mtx);
 
                        if (!found)
                                tmp = NULL;
@@ -182,19 +178,6 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
 /*
  * This function returns a pointer to the driver
  * that the genl_info item that is passed refers to.
- * If successful, it returns non-NULL and also locks
- * the driver's mutex!
- *
- * This means that you need to call cfg80211_unlock_rdev()
- * before being allowed to acquire &cfg80211_mutex!
- *
- * This is necessary because we need to lock the global
- * mutex to get an item off the list safely, and then
- * we lock the rdev mutex so it doesn't go away under us.
- *
- * We don't want to keep cfg80211_mutex locked
- * for all the time in order to allow requests on
- * other interfaces to go through at the same time.
  *
  * The result of this can be a PTR_ERR and hence must
  * be checked with IS_ERR() for errors.
@@ -202,20 +185,7 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
 static struct cfg80211_registered_device *
 cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
 {
-       struct cfg80211_registered_device *rdev;
-
-       mutex_lock(&cfg80211_mutex);
-       rdev = __cfg80211_rdev_from_attrs(netns, info->attrs);
-
-       /* if it is not an error we grab the lock on
-        * it to assure it won't be going away while
-        * we operate on it */
-       if (!IS_ERR(rdev))
-               mutex_lock(&rdev->mtx);
-
-       mutex_unlock(&cfg80211_mutex);
-
-       return rdev;
+       return __cfg80211_rdev_from_attrs(netns, info->attrs);
 }
 
 /* policy for the attributes */
@@ -378,6 +348,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_MDID] = { .type = NLA_U16 },
        [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
                                  .len = IEEE80211_MAX_DATA_LEN },
+       [NL80211_ATTR_PEER_AID] = { .type = NLA_U16 },
 };
 
 /* policy for the key attributes */
@@ -455,7 +426,6 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
        int err;
 
        rtnl_lock();
-       mutex_lock(&cfg80211_mutex);
 
        if (!cb->args[0]) {
                err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
@@ -484,14 +454,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                *rdev = wiphy_to_dev(wiphy);
                *wdev = NULL;
 
-               mutex_lock(&(*rdev)->devlist_mtx);
                list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
                        if (tmp->identifier == cb->args[1]) {
                                *wdev = tmp;
                                break;
                        }
                }
-               mutex_unlock(&(*rdev)->devlist_mtx);
 
                if (!*wdev) {
                        err = -ENODEV;
@@ -499,19 +467,14 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
                }
        }
 
-       cfg80211_lock_rdev(*rdev);
-
-       mutex_unlock(&cfg80211_mutex);
        return 0;
  out_unlock:
-       mutex_unlock(&cfg80211_mutex);
        rtnl_unlock();
        return err;
 }
 
 static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
 {
-       cfg80211_unlock_rdev(rdev);
        rtnl_unlock();
 }
 
@@ -837,12 +800,9 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
        case NL80211_IFTYPE_MESH_POINT:
                break;
        case NL80211_IFTYPE_ADHOC:
-               if (!wdev->current_bss)
-                       return -ENOLINK;
-               break;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
-               if (wdev->sme_state != CFG80211_SME_CONNECTED)
+               if (!wdev->current_bss)
                        return -ENOLINK;
                break;
        default:
@@ -945,7 +905,7 @@ nla_put_failure:
 static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
                                        struct sk_buff *msg)
 {
-       const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp;
+       const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan->tcp;
        struct nlattr *nl_tcp;
 
        if (!tcp)
@@ -988,37 +948,37 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
 {
        struct nlattr *nl_wowlan;
 
-       if (!dev->wiphy.wowlan.flags && !dev->wiphy.wowlan.n_patterns)
+       if (!dev->wiphy.wowlan)
                return 0;
 
        nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED);
        if (!nl_wowlan)
                return -ENOBUFS;
 
-       if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) &&
+       if (((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_ANY) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
-           ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) &&
+           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_DISCONNECT) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
-           ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) &&
+           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
-           ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
+           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
-           ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
+           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
-           ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
+           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
-           ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
+           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
-           ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
+           ((dev->wiphy.wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
             nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
                return -ENOBUFS;
 
-       if (dev->wiphy.wowlan.n_patterns) {
+       if (dev->wiphy.wowlan->n_patterns) {
                struct nl80211_wowlan_pattern_support pat = {
-                       .max_patterns = dev->wiphy.wowlan.n_patterns,
-                       .min_pattern_len = dev->wiphy.wowlan.pattern_min_len,
-                       .max_pattern_len = dev->wiphy.wowlan.pattern_max_len,
-                       .max_pkt_offset = dev->wiphy.wowlan.max_pkt_offset,
+                       .max_patterns = dev->wiphy.wowlan->n_patterns,
+                       .min_pattern_len = dev->wiphy.wowlan->pattern_min_len,
+                       .max_pattern_len = dev->wiphy.wowlan->pattern_max_len,
+                       .max_pkt_offset = dev->wiphy.wowlan->max_pkt_offset,
                };
 
                if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
@@ -1151,10 +1111,16 @@ nl80211_send_mgmt_stypes(struct sk_buff *msg,
        return 0;
 }
 
+struct nl80211_dump_wiphy_state {
+       s64 filter_wiphy;
+       long start;
+       long split_start, band_start, chan_start;
+       bool split;
+};
+
 static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                              struct sk_buff *msg, u32 portid, u32 seq,
-                             int flags, bool split, long *split_start,
-                             long *band_start, long *chan_start)
+                             int flags, struct nl80211_dump_wiphy_state *state)
 {
        void *hdr;
        struct nlattr *nl_bands, *nl_band;
@@ -1165,19 +1131,14 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
        int i;
        const struct ieee80211_txrx_stypes *mgmt_stypes =
                                dev->wiphy.mgmt_stypes;
-       long start = 0, start_chan = 0, start_band = 0;
        u32 features;
 
        hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_WIPHY);
        if (!hdr)
                return -ENOBUFS;
 
-       /* allow always using the variables */
-       if (!split) {
-               split_start = &start;
-               band_start = &start_band;
-               chan_start = &start_chan;
-       }
+       if (WARN_ON(!state))
+               return -EINVAL;
 
        if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) ||
            nla_put_string(msg, NL80211_ATTR_WIPHY_NAME,
@@ -1186,7 +1147,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        cfg80211_rdev_list_generation))
                goto nla_put_failure;
 
-       switch (*split_start) {
+       switch (state->split_start) {
        case 0:
                if (nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
                               dev->wiphy.retry_short) ||
@@ -1228,9 +1189,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
                    nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
                        goto nla_put_failure;
+               if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ) &&
+                   nla_put_flag(msg, WIPHY_FLAG_SUPPORTS_5_10_MHZ))
+                       goto nla_put_failure;
 
-               (*split_start)++;
-               if (split)
+               state->split_start++;
+               if (state->split)
                        break;
        case 1:
                if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
@@ -1274,22 +1238,23 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        }
                }
 
-               (*split_start)++;
-               if (split)
+               state->split_start++;
+               if (state->split)
                        break;
        case 2:
                if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES,
                                        dev->wiphy.interface_modes))
                                goto nla_put_failure;
-               (*split_start)++;
-               if (split)
+               state->split_start++;
+               if (state->split)
                        break;
        case 3:
                nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS);
                if (!nl_bands)
                        goto nla_put_failure;
 
-               for (band = *band_start; band < IEEE80211_NUM_BANDS; band++) {
+               for (band = state->band_start;
+                    band < IEEE80211_NUM_BANDS; band++) {
                        struct ieee80211_supported_band *sband;
 
                        sband = dev->wiphy.bands[band];
@@ -1301,12 +1266,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        if (!nl_band)
                                goto nla_put_failure;
 
-                       switch (*chan_start) {
+                       switch (state->chan_start) {
                        case 0:
                                if (nl80211_send_band_rateinfo(msg, sband))
                                        goto nla_put_failure;
-                               (*chan_start)++;
-                               if (split)
+                               state->chan_start++;
+                               if (state->split)
                                        break;
                        default:
                                /* add frequencies */
@@ -1315,7 +1280,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                                if (!nl_freqs)
                                        goto nla_put_failure;
 
-                               for (i = *chan_start - 1;
+                               for (i = state->chan_start - 1;
                                     i < sband->n_channels;
                                     i++) {
                                        nl_freq = nla_nest_start(msg, i);
@@ -1324,26 +1289,27 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
 
                                        chan = &sband->channels[i];
 
-                                       if (nl80211_msg_put_channel(msg, chan,
-                                                                   split))
+                                       if (nl80211_msg_put_channel(
+                                                       msg, chan,
+                                                       state->split))
                                                goto nla_put_failure;
 
                                        nla_nest_end(msg, nl_freq);
-                                       if (split)
+                                       if (state->split)
                                                break;
                                }
                                if (i < sband->n_channels)
-                                       *chan_start = i + 2;
+                                       state->chan_start = i + 2;
                                else
-                                       *chan_start = 0;
+                                       state->chan_start = 0;
                                nla_nest_end(msg, nl_freqs);
                        }
 
                        nla_nest_end(msg, nl_band);
 
-                       if (split) {
+                       if (state->split) {
                                /* start again here */
-                               if (*chan_start)
+                               if (state->chan_start)
                                        band--;
                                break;
                        }
@@ -1351,14 +1317,14 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                nla_nest_end(msg, nl_bands);
 
                if (band < IEEE80211_NUM_BANDS)
-                       *band_start = band + 1;
+                       state->band_start = band + 1;
                else
-                       *band_start = 0;
+                       state->band_start = 0;
 
                /* if bands & channels are done, continue outside */
-               if (*band_start == 0 && *chan_start == 0)
-                       (*split_start)++;
-               if (split)
+               if (state->band_start == 0 && state->chan_start == 0)
+                       state->split_start++;
+               if (state->split)
                        break;
        case 4:
                nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS);
@@ -1424,7 +1390,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                }
                CMD(start_p2p_device, START_P2P_DEVICE);
                CMD(set_mcast_rate, SET_MCAST_RATE);
-               if (split) {
+               if (state->split) {
                        CMD(crit_proto_start, CRIT_PROTOCOL_START);
                        CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
                }
@@ -1448,8 +1414,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                }
 
                nla_nest_end(msg, nl_cmds);
-               (*split_start)++;
-               if (split)
+               state->split_start++;
+               if (state->split)
                        break;
        case 5:
                if (dev->ops->remain_on_channel &&
@@ -1465,29 +1431,30 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
 
                if (nl80211_send_mgmt_stypes(msg, mgmt_stypes))
                        goto nla_put_failure;
-               (*split_start)++;
-               if (split)
+               state->split_start++;
+               if (state->split)
                        break;
        case 6:
 #ifdef CONFIG_PM
-               if (nl80211_send_wowlan(msg, dev, split))
+               if (nl80211_send_wowlan(msg, dev, state->split))
                        goto nla_put_failure;
-               (*split_start)++;
-               if (split)
+               state->split_start++;
+               if (state->split)
                        break;
 #else
-               (*split_start)++;
+               state->split_start++;
 #endif
        case 7:
                if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
                                        dev->wiphy.software_iftypes))
                        goto nla_put_failure;
 
-               if (nl80211_put_iface_combinations(&dev->wiphy, msg, split))
+               if (nl80211_put_iface_combinations(&dev->wiphy, msg,
+                                                  state->split))
                        goto nla_put_failure;
 
-               (*split_start)++;
-               if (split)
+               state->split_start++;
+               if (state->split)
                        break;
        case 8:
                if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
@@ -1501,7 +1468,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                 * dump is split, otherwise it makes it too big. Therefore
                 * only advertise it in that case.
                 */
-               if (split)
+               if (state->split)
                        features |= NL80211_FEATURE_ADVERTISE_CHAN_LIMITS;
                if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, features))
                        goto nla_put_failure;
@@ -1528,7 +1495,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                 * case we'll continue with more data in the next round,
                 * but break unconditionally so unsplit data stops here.
                 */
-               (*split_start)++;
+               state->split_start++;
                break;
        case 9:
                if (dev->wiphy.extended_capabilities &&
@@ -1547,7 +1514,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                        goto nla_put_failure;
 
                /* done */
-               *split_start = 0;
+               state->split_start = 0;
                break;
        }
        return genlmsg_end(msg, hdr);
@@ -1557,66 +1524,76 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
        return -EMSGSIZE;
 }
 
+static int nl80211_dump_wiphy_parse(struct sk_buff *skb,
+                                   struct netlink_callback *cb,
+                                   struct nl80211_dump_wiphy_state *state)
+{
+       struct nlattr **tb = nl80211_fam.attrbuf;
+       int ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
+                             tb, nl80211_fam.maxattr, nl80211_policy);
+       /* ignore parse errors for backward compatibility */
+       if (ret)
+               return 0;
+
+       state->split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP];
+       if (tb[NL80211_ATTR_WIPHY])
+               state->filter_wiphy = nla_get_u32(tb[NL80211_ATTR_WIPHY]);
+       if (tb[NL80211_ATTR_WDEV])
+               state->filter_wiphy = nla_get_u64(tb[NL80211_ATTR_WDEV]) >> 32;
+       if (tb[NL80211_ATTR_IFINDEX]) {
+               struct net_device *netdev;
+               struct cfg80211_registered_device *rdev;
+               int ifidx = nla_get_u32(tb[NL80211_ATTR_IFINDEX]);
+
+               netdev = dev_get_by_index(sock_net(skb->sk), ifidx);
+               if (!netdev)
+                       return -ENODEV;
+               if (netdev->ieee80211_ptr) {
+                       rdev = wiphy_to_dev(
+                               netdev->ieee80211_ptr->wiphy);
+                       state->filter_wiphy = rdev->wiphy_idx;
+               }
+               dev_put(netdev);
+       }
+
+       return 0;
+}
+
 static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int idx = 0, ret;
-       int start = cb->args[0];
+       struct nl80211_dump_wiphy_state *state = (void *)cb->args[0];
        struct cfg80211_registered_device *dev;
-       s64 filter_wiphy = -1;
-       bool split = false;
-       struct nlattr **tb;
-       int res;
 
-       /* will be zeroed in nlmsg_parse() */
-       tb = kmalloc(sizeof(*tb) * (NL80211_ATTR_MAX + 1), GFP_KERNEL);
-       if (!tb)
-               return -ENOMEM;
-
-       mutex_lock(&cfg80211_mutex);
-       res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
-                         tb, NL80211_ATTR_MAX, nl80211_policy);
-       if (res == 0) {
-               split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP];
-               if (tb[NL80211_ATTR_WIPHY])
-                       filter_wiphy = nla_get_u32(tb[NL80211_ATTR_WIPHY]);
-               if (tb[NL80211_ATTR_WDEV])
-                       filter_wiphy = nla_get_u64(tb[NL80211_ATTR_WDEV]) >> 32;
-               if (tb[NL80211_ATTR_IFINDEX]) {
-                       struct net_device *netdev;
-                       int ifidx = nla_get_u32(tb[NL80211_ATTR_IFINDEX]);
-
-                       netdev = dev_get_by_index(sock_net(skb->sk), ifidx);
-                       if (!netdev) {
-                               mutex_unlock(&cfg80211_mutex);
-                               kfree(tb);
-                               return -ENODEV;
-                       }
-                       if (netdev->ieee80211_ptr) {
-                               dev = wiphy_to_dev(
-                                       netdev->ieee80211_ptr->wiphy);
-                               filter_wiphy = dev->wiphy_idx;
-                       }
-                       dev_put(netdev);
+       rtnl_lock();
+       if (!state) {
+               state = kzalloc(sizeof(*state), GFP_KERNEL);
+               if (!state)
+                       return -ENOMEM;
+               state->filter_wiphy = -1;
+               ret = nl80211_dump_wiphy_parse(skb, cb, state);
+               if (ret) {
+                       kfree(state);
+                       rtnl_unlock();
+                       return ret;
                }
+               cb->args[0] = (long)state;
        }
-       kfree(tb);
 
        list_for_each_entry(dev, &cfg80211_rdev_list, list) {
                if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk)))
                        continue;
-               if (++idx <= start)
+               if (++idx <= state->start)
                        continue;
-               if (filter_wiphy != -1 && dev->wiphy_idx != filter_wiphy)
+               if (state->filter_wiphy != -1 &&
+                   state->filter_wiphy != dev->wiphy_idx)
                        continue;
                /* attempt to fit multiple wiphy data chunks into the skb */
                do {
                        ret = nl80211_send_wiphy(dev, skb,
                                                 NETLINK_CB(cb->skb).portid,
                                                 cb->nlh->nlmsg_seq,
-                                                NLM_F_MULTI,
-                                                split, &cb->args[1],
-                                                &cb->args[2],
-                                                &cb->args[3]);
+                                                NLM_F_MULTI, state);
                        if (ret < 0) {
                                /*
                                 * If sending the wiphy data didn't fit (ENOBUFS
@@ -1635,33 +1612,40 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
                                    !skb->len &&
                                    cb->min_dump_alloc < 4096) {
                                        cb->min_dump_alloc = 4096;
-                                       mutex_unlock(&cfg80211_mutex);
+                                       rtnl_unlock();
                                        return 1;
                                }
                                idx--;
                                break;
                        }
-               } while (cb->args[1] > 0);
+               } while (state->split_start > 0);
                break;
        }
-       mutex_unlock(&cfg80211_mutex);
+       rtnl_unlock();
 
-       cb->args[0] = idx;
+       state->start = idx;
 
        return skb->len;
 }
 
+static int nl80211_dump_wiphy_done(struct netlink_callback *cb)
+{
+       kfree((void *)cb->args[0]);
+       return 0;
+}
+
 static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
 {
        struct sk_buff *msg;
        struct cfg80211_registered_device *dev = info->user_ptr[0];
+       struct nl80211_dump_wiphy_state state = {};
 
        msg = nlmsg_new(4096, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
 
        if (nl80211_send_wiphy(dev, msg, info->snd_portid, info->snd_seq, 0,
-                              false, NULL, NULL, NULL) < 0) {
+                              &state) < 0) {
                nlmsg_free(msg);
                return -ENOBUFS;
        }
@@ -1778,6 +1762,11 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
                                     IEEE80211_CHAN_DISABLED))
                return -EINVAL;
 
+       if ((chandef->width == NL80211_CHAN_WIDTH_5 ||
+            chandef->width == NL80211_CHAN_WIDTH_10) &&
+           !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_5_10_MHZ))
+               return -EINVAL;
+
        return 0;
 }
 
@@ -1799,7 +1788,6 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
        if (result)
                return result;
 
-       mutex_lock(&rdev->devlist_mtx);
        switch (iftype) {
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
@@ -1823,7 +1811,6 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
        default:
                result = -EINVAL;
        }
-       mutex_unlock(&rdev->devlist_mtx);
 
        return result;
 }
@@ -1872,6 +1859,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
        u32 frag_threshold = 0, rts_threshold = 0;
        u8 coverage_class = 0;
 
+       ASSERT_RTNL();
+
        /*
         * Try to find the wiphy and netdev. Normally this
         * function shouldn't need the netdev, but this is
@@ -1881,31 +1870,25 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
         * also passed a netdev to set_wiphy, so that it is
         * possible to let that go to the right netdev!
         */
-       mutex_lock(&cfg80211_mutex);
 
        if (info->attrs[NL80211_ATTR_IFINDEX]) {
                int ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]);
 
                netdev = dev_get_by_index(genl_info_net(info), ifindex);
-               if (netdev && netdev->ieee80211_ptr) {
+               if (netdev && netdev->ieee80211_ptr)
                        rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy);
-                       mutex_lock(&rdev->mtx);
-               } else
+               else
                        netdev = NULL;
        }
 
        if (!netdev) {
                rdev = __cfg80211_rdev_from_attrs(genl_info_net(info),
                                                  info->attrs);
-               if (IS_ERR(rdev)) {
-                       mutex_unlock(&cfg80211_mutex);
+               if (IS_ERR(rdev))
                        return PTR_ERR(rdev);
-               }
                wdev = NULL;
                netdev = NULL;
                result = 0;
-
-               mutex_lock(&rdev->mtx);
        } else
                wdev = netdev->ieee80211_ptr;
 
@@ -1918,8 +1901,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
                result = cfg80211_dev_rename(
                        rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME]));
 
-       mutex_unlock(&cfg80211_mutex);
-
        if (result)
                goto bad_res;
 
@@ -2126,7 +2107,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
        }
 
  bad_res:
-       mutex_unlock(&rdev->mtx);
        if (netdev)
                dev_put(netdev);
        return result;
@@ -2224,7 +2204,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
        struct cfg80211_registered_device *rdev;
        struct wireless_dev *wdev;
 
-       mutex_lock(&cfg80211_mutex);
+       rtnl_lock();
        list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
                if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
                        continue;
@@ -2234,7 +2214,6 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
                }
                if_idx = 0;
 
-               mutex_lock(&rdev->devlist_mtx);
                list_for_each_entry(wdev, &rdev->wdev_list, list) {
                        if (if_idx < if_start) {
                                if_idx++;
@@ -2243,17 +2222,15 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
                        if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid,
                                               cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                               rdev, wdev) < 0) {
-                               mutex_unlock(&rdev->devlist_mtx);
                                goto out;
                        }
                        if_idx++;
                }
-               mutex_unlock(&rdev->devlist_mtx);
 
                wp_idx++;
        }
  out:
-       mutex_unlock(&cfg80211_mutex);
+       rtnl_unlock();
 
        cb->args[0] = wp_idx;
        cb->args[1] = if_idx;
@@ -2286,6 +2263,7 @@ static const struct nla_policy mntr_flags_policy[NL80211_MNTR_FLAG_MAX + 1] = {
        [NL80211_MNTR_FLAG_CONTROL] = { .type = NLA_FLAG },
        [NL80211_MNTR_FLAG_OTHER_BSS] = { .type = NLA_FLAG },
        [NL80211_MNTR_FLAG_COOK_FRAMES] = { .type = NLA_FLAG },
+       [NL80211_MNTR_FLAG_ACTIVE] = { .type = NLA_FLAG },
 };
 
 static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags)
@@ -2397,6 +2375,10 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
                change = true;
        }
 
+       if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) &&
+           !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
+               return -EOPNOTSUPP;
+
        if (change)
                err = cfg80211_change_iface(rdev, dev, ntype, flags, &params);
        else
@@ -2454,6 +2436,11 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
        err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
                                  info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
                                  &flags);
+
+       if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) &&
+           !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
+               return -EOPNOTSUPP;
+
        wdev = rdev_add_virtual_intf(rdev,
                                nla_data(info->attrs[NL80211_ATTR_IFNAME]),
                                type, err ? NULL : &flags, &params);
@@ -2486,11 +2473,9 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                INIT_LIST_HEAD(&wdev->mgmt_registrations);
                spin_lock_init(&wdev->mgmt_registrations_lock);
 
-               mutex_lock(&rdev->devlist_mtx);
                wdev->identifier = ++rdev->wdev_id;
                list_add_rcu(&wdev->list, &rdev->wdev_list);
                rdev->devlist_generation++;
-               mutex_unlock(&rdev->devlist_mtx);
                break;
        default:
                break;
@@ -2933,61 +2918,58 @@ static int nl80211_set_mac_acl(struct sk_buff *skb, struct genl_info *info)
        return err;
 }
 
-static int nl80211_parse_beacon(struct genl_info *info,
+static int nl80211_parse_beacon(struct nlattr *attrs[],
                                struct cfg80211_beacon_data *bcn)
 {
        bool haveinfo = false;
 
-       if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]) ||
-           !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]) ||
-           !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_PROBE_RESP]) ||
-           !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]))
+       if (!is_valid_ie_attr(attrs[NL80211_ATTR_BEACON_TAIL]) ||
+           !is_valid_ie_attr(attrs[NL80211_ATTR_IE]) ||
+           !is_valid_ie_attr(attrs[NL80211_ATTR_IE_PROBE_RESP]) ||
+           !is_valid_ie_attr(attrs[NL80211_ATTR_IE_ASSOC_RESP]))
                return -EINVAL;
 
        memset(bcn, 0, sizeof(*bcn));
 
-       if (info->attrs[NL80211_ATTR_BEACON_HEAD]) {
-               bcn->head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]);
-               bcn->head_len = nla_len(info->attrs[NL80211_ATTR_BEACON_HEAD]);
+       if (attrs[NL80211_ATTR_BEACON_HEAD]) {
+               bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]);
+               bcn->head_len = nla_len(attrs[NL80211_ATTR_BEACON_HEAD]);
                if (!bcn->head_len)
                        return -EINVAL;
                haveinfo = true;
        }
 
-       if (info->attrs[NL80211_ATTR_BEACON_TAIL]) {
-               bcn->tail = nla_data(info->attrs[NL80211_ATTR_BEACON_TAIL]);
-               bcn->tail_len =
-                   nla_len(info->attrs[NL80211_ATTR_BEACON_TAIL]);
+       if (attrs[NL80211_ATTR_BEACON_TAIL]) {
+               bcn->tail = nla_data(attrs[NL80211_ATTR_BEACON_TAIL]);
+               bcn->tail_len = nla_len(attrs[NL80211_ATTR_BEACON_TAIL]);
                haveinfo = true;
        }
 
        if (!haveinfo)
                return -EINVAL;
 
-       if (info->attrs[NL80211_ATTR_IE]) {
-               bcn->beacon_ies = nla_data(info->attrs[NL80211_ATTR_IE]);
-               bcn->beacon_ies_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+       if (attrs[NL80211_ATTR_IE]) {
+               bcn->beacon_ies = nla_data(attrs[NL80211_ATTR_IE]);
+               bcn->beacon_ies_len = nla_len(attrs[NL80211_ATTR_IE]);
        }
 
-       if (info->attrs[NL80211_ATTR_IE_PROBE_RESP]) {
+       if (attrs[NL80211_ATTR_IE_PROBE_RESP]) {
                bcn->proberesp_ies =
-                       nla_data(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
+                       nla_data(attrs[NL80211_ATTR_IE_PROBE_RESP]);
                bcn->proberesp_ies_len =
-                       nla_len(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
+                       nla_len(attrs[NL80211_ATTR_IE_PROBE_RESP]);
        }
 
-       if (info->attrs[NL80211_ATTR_IE_ASSOC_RESP]) {
+       if (attrs[NL80211_ATTR_IE_ASSOC_RESP]) {
                bcn->assocresp_ies =
-                       nla_data(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
+                       nla_data(attrs[NL80211_ATTR_IE_ASSOC_RESP]);
                bcn->assocresp_ies_len =
-                       nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
+                       nla_len(attrs[NL80211_ATTR_IE_ASSOC_RESP]);
        }
 
-       if (info->attrs[NL80211_ATTR_PROBE_RESP]) {
-               bcn->probe_resp =
-                       nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]);
-               bcn->probe_resp_len =
-                       nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]);
+       if (attrs[NL80211_ATTR_PROBE_RESP]) {
+               bcn->probe_resp = nla_data(attrs[NL80211_ATTR_PROBE_RESP]);
+               bcn->probe_resp_len = nla_len(attrs[NL80211_ATTR_PROBE_RESP]);
        }
 
        return 0;
@@ -2999,8 +2981,6 @@ static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
        struct wireless_dev *wdev;
        bool ret = false;
 
-       mutex_lock(&rdev->devlist_mtx);
-
        list_for_each_entry(wdev, &rdev->wdev_list, list) {
                if (wdev->iftype != NL80211_IFTYPE_AP &&
                    wdev->iftype != NL80211_IFTYPE_P2P_GO)
@@ -3014,8 +2994,6 @@ static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
                break;
        }
 
-       mutex_unlock(&rdev->devlist_mtx);
-
        return ret;
 }
 
@@ -3070,7 +3048,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
            !info->attrs[NL80211_ATTR_BEACON_HEAD])
                return -EINVAL;
 
-       err = nl80211_parse_beacon(info, &params.beacon);
+       err = nl80211_parse_beacon(info->attrs, &params.beacon);
        if (err)
                return err;
 
@@ -3177,13 +3155,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                params.radar_required = true;
        }
 
-       mutex_lock(&rdev->devlist_mtx);
        err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
                                           params.chandef.chan,
                                           CHAN_MODE_SHARED,
                                           radar_detect_width);
-       mutex_unlock(&rdev->devlist_mtx);
-
        if (err)
                return err;
 
@@ -3225,7 +3200,7 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info)
        if (!wdev->beacon_interval)
                return -EINVAL;
 
-       err = nl80211_parse_beacon(info, &params);
+       err = nl80211_parse_beacon(info->attrs, &params);
        if (err)
                return err;
 
@@ -3383,6 +3358,32 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
        return true;
 }
 
+static bool nl80211_put_signal(struct sk_buff *msg, u8 mask, s8 *signal,
+                              int id)
+{
+       void *attr;
+       int i = 0;
+
+       if (!mask)
+               return true;
+
+       attr = nla_nest_start(msg, id);
+       if (!attr)
+               return false;
+
+       for (i = 0; i < IEEE80211_MAX_CHAINS; i++) {
+               if (!(mask & BIT(i)))
+                       continue;
+
+               if (nla_put_u8(msg, i, signal[i]))
+                       return false;
+       }
+
+       nla_nest_end(msg, attr);
+
+       return true;
+}
+
 static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
                                int flags,
                                struct cfg80211_registered_device *rdev,
@@ -3454,6 +3455,18 @@ static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq,
        default:
                break;
        }
+       if (sinfo->filled & STATION_INFO_CHAIN_SIGNAL) {
+               if (!nl80211_put_signal(msg, sinfo->chains,
+                                       sinfo->chain_signal,
+                                       NL80211_STA_INFO_CHAIN_SIGNAL))
+                       goto nla_put_failure;
+       }
+       if (sinfo->filled & STATION_INFO_CHAIN_SIGNAL_AVG) {
+               if (!nl80211_put_signal(msg, sinfo->chains,
+                                       sinfo->chain_signal_avg,
+                                       NL80211_STA_INFO_CHAIN_SIGNAL_AVG))
+                       goto nla_put_failure;
+       }
        if (sinfo->filled & STATION_INFO_TX_BITRATE) {
                if (!nl80211_put_sta_rate(msg, &sinfo->txrate,
                                          NL80211_STA_INFO_TX_BITRATE))
@@ -3841,6 +3854,8 @@ static int nl80211_set_station_tdls(struct genl_info *info,
                                    struct station_parameters *params)
 {
        /* Dummy STA entry gets updated once the peer capabilities are known */
+       if (info->attrs[NL80211_ATTR_PEER_AID])
+               params->aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]);
        if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
                params->ht_capa =
                        nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
@@ -3981,7 +3996,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
        if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES])
                return -EINVAL;
 
-       if (!info->attrs[NL80211_ATTR_STA_AID])
+       if (!info->attrs[NL80211_ATTR_STA_AID] &&
+           !info->attrs[NL80211_ATTR_PEER_AID])
                return -EINVAL;
 
        mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
@@ -3992,7 +4008,10 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
        params.listen_interval =
                nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
 
-       params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
+       if (info->attrs[NL80211_ATTR_PEER_AID])
+               params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]);
+       else
+               params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
        if (!params.aid || params.aid > IEEE80211_MAX_AID)
                return -EINVAL;
 
@@ -4044,7 +4063,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
                        params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD;
 
                /* TDLS peers cannot be added */
-               if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+               if ((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) ||
+                   info->attrs[NL80211_ATTR_PEER_AID])
                        return -EINVAL;
                /* but don't bother the driver with it */
                params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
@@ -4070,7 +4090,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
                if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED))
                        return -EINVAL;
                /* TDLS peers cannot be added */
-               if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+               if ((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) ||
+                   info->attrs[NL80211_ATTR_PEER_AID])
                        return -EINVAL;
                break;
        case NL80211_IFTYPE_STATION:
@@ -4592,7 +4613,9 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
            nla_put_u32(msg, NL80211_MESHCONF_POWER_MODE,
                        cur_params.power_mode) ||
            nla_put_u16(msg, NL80211_MESHCONF_AWAKE_WINDOW,
-                       cur_params.dot11MeshAwakeWindowDuration))
+                       cur_params.dot11MeshAwakeWindowDuration) ||
+           nla_put_u32(msg, NL80211_MESHCONF_PLINK_TIMEOUT,
+                       cur_params.plink_timeout))
                goto nla_put_failure;
        nla_nest_end(msg, pinfoattr);
        genlmsg_end(msg, hdr);
@@ -4633,6 +4656,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
        [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 },
        [NL80211_MESHCONF_POWER_MODE] = { .type = NLA_U32 },
        [NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 },
+       [NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 },
 };
 
 static const struct nla_policy
@@ -4641,6 +4665,7 @@ static const struct nla_policy
        [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 },
        [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
        [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
+       [NL80211_MESH_SETUP_AUTH_PROTOCOL] = { .type = NLA_U8 },
        [NL80211_MESH_SETUP_USERSPACE_MPM] = { .type = NLA_FLAG },
        [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY,
                                    .len = IEEE80211_MAX_DATA_LEN },
@@ -4769,6 +4794,9 @@ do {                                                                          \
        FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration,
                                  0, 65535, mask,
                                  NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16);
+       FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 1, 0xffffffff,
+                                 mask, NL80211_MESHCONF_PLINK_TIMEOUT,
+                                 nla_get_u32);
        if (mask_out)
                *mask_out = mask;
 
@@ -4826,6 +4854,13 @@ static int nl80211_parse_mesh_setup(struct genl_info *info,
        if (setup->is_secure)
                setup->user_mpm = true;
 
+       if (tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]) {
+               if (!setup->user_mpm)
+                       return -EINVAL;
+               setup->auth_id =
+                       nla_get_u8(tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]);
+       }
+
        return 0;
 }
 
@@ -4868,18 +4903,13 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
        void *hdr = NULL;
        struct nlattr *nl_reg_rules;
        unsigned int i;
-       int err = -EINVAL;
-
-       mutex_lock(&cfg80211_mutex);
 
        if (!cfg80211_regdomain)
-               goto out;
+               return -EINVAL;
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (!msg) {
-               err = -ENOBUFS;
-               goto out;
-       }
+       if (!msg)
+               return -ENOBUFS;
 
        hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_GET_REG);
@@ -4938,8 +4968,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
        nla_nest_end(msg, nl_reg_rules);
 
        genlmsg_end(msg, hdr);
-       err = genlmsg_reply(msg, info);
-       goto out;
+       return genlmsg_reply(msg, info);
 
 nla_put_failure_rcu:
        rcu_read_unlock();
@@ -4947,10 +4976,7 @@ nla_put_failure:
        genlmsg_cancel(msg, hdr);
 put_failure:
        nlmsg_free(msg);
-       err = -EMSGSIZE;
-out:
-       mutex_unlock(&cfg80211_mutex);
-       return err;
+       return -EMSGSIZE;
 }
 
 static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
@@ -5016,12 +5042,9 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
                }
        }
 
-       mutex_lock(&cfg80211_mutex);
-
        r = set_regdom(rd);
        /* set_regdom took ownership */
        rd = NULL;
-       mutex_unlock(&cfg80211_mutex);
 
  bad_reg:
        kfree(rd);
@@ -5071,7 +5094,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->scan)
                return -EOPNOTSUPP;
 
-       mutex_lock(&rdev->sched_scan_mtx);
        if (rdev->scan_req) {
                err = -EBUSY;
                goto unlock;
@@ -5257,7 +5279,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
        }
 
  unlock:
-       mutex_unlock(&rdev->sched_scan_mtx);
        return err;
 }
 
@@ -5329,8 +5350,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
        if (ie_len > wiphy->max_sched_scan_ie_len)
                return -EINVAL;
 
-       mutex_lock(&rdev->sched_scan_mtx);
-
        if (rdev->sched_scan_req) {
                err = -EINPROGRESS;
                goto out;
@@ -5498,7 +5517,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
 out_free:
        kfree(request);
 out:
-       mutex_unlock(&rdev->sched_scan_mtx);
        return err;
 }
 
@@ -5506,17 +5524,12 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb,
                                   struct genl_info *info)
 {
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
-       int err;
 
        if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
            !rdev->ops->sched_scan_stop)
                return -EOPNOTSUPP;
 
-       mutex_lock(&rdev->sched_scan_mtx);
-       err = __cfg80211_stop_sched_scan(rdev, false);
-       mutex_unlock(&rdev->sched_scan_mtx);
-
-       return err;
+       return __cfg80211_stop_sched_scan(rdev, false);
 }
 
 static int nl80211_start_radar_detection(struct sk_buff *skb,
@@ -5548,12 +5561,11 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
        if (!rdev->ops->start_radar_detection)
                return -EOPNOTSUPP;
 
-       mutex_lock(&rdev->devlist_mtx);
        err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
                                           chandef.chan, CHAN_MODE_SHARED,
                                           BIT(chandef.width));
        if (err)
-               goto err_locked;
+               return err;
 
        err = rdev->ops->start_radar_detection(&rdev->wiphy, dev, &chandef);
        if (!err) {
@@ -5561,9 +5573,6 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
                wdev->cac_started = true;
                wdev->cac_start_time = jiffies;
        }
-err_locked:
-       mutex_unlock(&rdev->devlist_mtx);
-
        return err;
 }
 
@@ -5946,10 +5955,13 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
        if (local_state_change)
                return 0;
 
-       return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
-                                 ssid, ssid_len, ie, ie_len,
-                                 key.p.key, key.p.key_len, key.idx,
-                                 sae_data, sae_data_len);
+       wdev_lock(dev->ieee80211_ptr);
+       err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
+                                ssid, ssid_len, ie, ie_len,
+                                key.p.key, key.p.key_len, key.idx,
+                                sae_data, sae_data_len);
+       wdev_unlock(dev->ieee80211_ptr);
+       return err;
 }
 
 static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
@@ -6116,9 +6128,12 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
        }
 
        err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
-       if (!err)
+       if (!err) {
+               wdev_lock(dev->ieee80211_ptr);
                err = cfg80211_mlme_assoc(rdev, dev, chan, bssid,
                                          ssid, ssid_len, &req);
+               wdev_unlock(dev->ieee80211_ptr);
+       }
 
        return err;
 }
@@ -6128,7 +6143,7 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct net_device *dev = info->user_ptr[1];
        const u8 *ie = NULL, *bssid;
-       int ie_len = 0;
+       int ie_len = 0, err;
        u16 reason_code;
        bool local_state_change;
 
@@ -6163,8 +6178,11 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
 
        local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
 
-       return cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code,
-                                   local_state_change);
+       wdev_lock(dev->ieee80211_ptr);
+       err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code,
+                                  local_state_change);
+       wdev_unlock(dev->ieee80211_ptr);
+       return err;
 }
 
 static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
@@ -6172,7 +6190,7 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct net_device *dev = info->user_ptr[1];
        const u8 *ie = NULL, *bssid;
-       int ie_len = 0;
+       int ie_len = 0, err;
        u16 reason_code;
        bool local_state_change;
 
@@ -6207,8 +6225,11 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
 
        local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
 
-       return cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code,
-                                     local_state_change);
+       wdev_lock(dev->ieee80211_ptr);
+       err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code,
+                                    local_state_change);
+       wdev_unlock(dev->ieee80211_ptr);
+       return err;
 }
 
 static bool
@@ -6295,11 +6316,16 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
        if (!cfg80211_reg_can_beacon(&rdev->wiphy, &ibss.chandef))
                return -EINVAL;
 
-       if (ibss.chandef.width > NL80211_CHAN_WIDTH_40)
-               return -EINVAL;
-       if (ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
-           !(rdev->wiphy.features & NL80211_FEATURE_HT_IBSS))
+       switch (ibss.chandef.width) {
+       case NL80211_CHAN_WIDTH_20_NOHT:
+               break;
+       case NL80211_CHAN_WIDTH_20:
+       case NL80211_CHAN_WIDTH_40:
+               if (rdev->wiphy.features & NL80211_FEATURE_HT_IBSS)
+                       break;
+       default:
                return -EINVAL;
+       }
 
        ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED];
        ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
@@ -6426,6 +6452,8 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
        void *data = NULL;
        int data_len = 0;
 
+       rtnl_lock();
+
        if (cb->args[0]) {
                /*
                 * 0 is a valid index, but not valid for args[0],
@@ -6437,18 +6465,16 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
                                  nl80211_fam.attrbuf, nl80211_fam.maxattr,
                                  nl80211_policy);
                if (err)
-                       return err;
+                       goto out_err;
 
-               mutex_lock(&cfg80211_mutex);
                rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk),
                                                  nl80211_fam.attrbuf);
                if (IS_ERR(rdev)) {
-                       mutex_unlock(&cfg80211_mutex);
-                       return PTR_ERR(rdev);
+                       err = PTR_ERR(rdev);
+                       goto out_err;
                }
                phy_idx = rdev->wiphy_idx;
                rdev = NULL;
-               mutex_unlock(&cfg80211_mutex);
 
                if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA])
                        cb->args[1] =
@@ -6460,14 +6486,11 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
                data_len = nla_len((void *)cb->args[1]);
        }
 
-       mutex_lock(&cfg80211_mutex);
        rdev = cfg80211_rdev_by_wiphy_idx(phy_idx);
        if (!rdev) {
-               mutex_unlock(&cfg80211_mutex);
-               return -ENOENT;
+               err = -ENOENT;
+               goto out_err;
        }
-       cfg80211_lock_rdev(rdev);
-       mutex_unlock(&cfg80211_mutex);
 
        if (!rdev->ops->testmode_dump) {
                err = -EOPNOTSUPP;
@@ -6508,7 +6531,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
        /* see above */
        cb->args[0] = phy_idx + 1;
  out_err:
-       cfg80211_unlock_rdev(rdev);
+       rtnl_unlock();
        return err;
 }
 
@@ -6716,7 +6739,9 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
                       sizeof(connect.vht_capa));
        }
 
-       err = cfg80211_connect(rdev, dev, &connect, connkeys);
+       wdev_lock(dev->ieee80211_ptr);
+       err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL);
+       wdev_unlock(dev->ieee80211_ptr);
        if (err)
                kfree(connkeys);
        return err;
@@ -6727,6 +6752,7 @@ static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct net_device *dev = info->user_ptr[1];
        u16 reason;
+       int ret;
 
        if (!info->attrs[NL80211_ATTR_REASON_CODE])
                reason = WLAN_REASON_DEAUTH_LEAVING;
@@ -6740,7 +6766,10 @@ static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
            dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
                return -EOPNOTSUPP;
 
-       return cfg80211_disconnect(rdev, dev, reason, true);
+       wdev_lock(dev->ieee80211_ptr);
+       ret = cfg80211_disconnect(rdev, dev, reason, true);
+       wdev_unlock(dev->ieee80211_ptr);
+       return ret;
 }
 
 static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info)
@@ -7159,6 +7188,9 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
                return -EOPNOTSUPP;
 
        switch (wdev->iftype) {
+       case NL80211_IFTYPE_P2P_DEVICE:
+               if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
+                       return -EINVAL;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_ADHOC:
        case NL80211_IFTYPE_P2P_CLIENT:
@@ -7166,7 +7198,6 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_MESH_POINT:
        case NL80211_IFTYPE_P2P_GO:
-       case NL80211_IFTYPE_P2P_DEVICE:
                break;
        default:
                return -EOPNOTSUPP;
@@ -7194,9 +7225,18 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
 
        no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
 
-       err = nl80211_parse_chandef(rdev, info, &chandef);
-       if (err)
-               return err;
+       /* get the channel if any has been specified, otherwise pass NULL to
+        * the driver. The latter will use the current one
+        */
+       chandef.chan = NULL;
+       if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
+               err = nl80211_parse_chandef(rdev, info, &chandef);
+               if (err)
+                       return err;
+       }
+
+       if (!chandef.chan && offchan)
+               return -EINVAL;
 
        if (!dont_wait_for_ack) {
                msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -7501,6 +7541,23 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
                setup.chandef.chan = NULL;
        }
 
+       if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) {
+               u8 *rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
+               int n_rates =
+                       nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
+               struct ieee80211_supported_band *sband;
+
+               if (!setup.chandef.chan)
+                       return -EINVAL;
+
+               sband = rdev->wiphy.bands[setup.chandef.chan->band];
+
+               err = ieee80211_get_ratemask(sband, rates, n_rates,
+                                            &setup.basic_rates);
+               if (err)
+                       return err;
+       }
+
        return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
 }
 
@@ -7516,28 +7573,29 @@ static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
 static int nl80211_send_wowlan_patterns(struct sk_buff *msg,
                                        struct cfg80211_registered_device *rdev)
 {
+       struct cfg80211_wowlan *wowlan = rdev->wiphy.wowlan_config;
        struct nlattr *nl_pats, *nl_pat;
        int i, pat_len;
 
-       if (!rdev->wowlan->n_patterns)
+       if (!wowlan->n_patterns)
                return 0;
 
        nl_pats = nla_nest_start(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN);
        if (!nl_pats)
                return -ENOBUFS;
 
-       for (i = 0; i < rdev->wowlan->n_patterns; i++) {
+       for (i = 0; i < wowlan->n_patterns; i++) {
                nl_pat = nla_nest_start(msg, i + 1);
                if (!nl_pat)
                        return -ENOBUFS;
-               pat_len = rdev->wowlan->patterns[i].pattern_len;
+               pat_len = wowlan->patterns[i].pattern_len;
                if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
                            DIV_ROUND_UP(pat_len, 8),
-                           rdev->wowlan->patterns[i].mask) ||
+                           wowlan->patterns[i].mask) ||
                    nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
-                           pat_len, rdev->wowlan->patterns[i].pattern) ||
+                           pat_len, wowlan->patterns[i].pattern) ||
                    nla_put_u32(msg, NL80211_WOWLAN_PKTPAT_OFFSET,
-                               rdev->wowlan->patterns[i].pkt_offset))
+                               wowlan->patterns[i].pkt_offset))
                        return -ENOBUFS;
                nla_nest_end(msg, nl_pat);
        }
@@ -7596,16 +7654,15 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
        void *hdr;
        u32 size = NLMSG_DEFAULT_SIZE;
 
-       if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns &&
-           !rdev->wiphy.wowlan.tcp)
+       if (!rdev->wiphy.wowlan)
                return -EOPNOTSUPP;
 
-       if (rdev->wowlan && rdev->wowlan->tcp) {
+       if (rdev->wiphy.wowlan_config && rdev->wiphy.wowlan_config->tcp) {
                /* adjust size to have room for all the data */
-               size += rdev->wowlan->tcp->tokens_size +
-                       rdev->wowlan->tcp->payload_len +
-                       rdev->wowlan->tcp->wake_len +
-                       rdev->wowlan->tcp->wake_len / 8;
+               size += rdev->wiphy.wowlan_config->tcp->tokens_size +
+                       rdev->wiphy.wowlan_config->tcp->payload_len +
+                       rdev->wiphy.wowlan_config->tcp->wake_len +
+                       rdev->wiphy.wowlan_config->tcp->wake_len / 8;
        }
 
        msg = nlmsg_new(size, GFP_KERNEL);
@@ -7617,33 +7674,34 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
        if (!hdr)
                goto nla_put_failure;
 
-       if (rdev->wowlan) {
+       if (rdev->wiphy.wowlan_config) {
                struct nlattr *nl_wowlan;
 
                nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS);
                if (!nl_wowlan)
                        goto nla_put_failure;
 
-               if ((rdev->wowlan->any &&
+               if ((rdev->wiphy.wowlan_config->any &&
                     nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
-                   (rdev->wowlan->disconnect &&
+                   (rdev->wiphy.wowlan_config->disconnect &&
                     nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
-                   (rdev->wowlan->magic_pkt &&
+                   (rdev->wiphy.wowlan_config->magic_pkt &&
                     nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
-                   (rdev->wowlan->gtk_rekey_failure &&
+                   (rdev->wiphy.wowlan_config->gtk_rekey_failure &&
                     nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
-                   (rdev->wowlan->eap_identity_req &&
+                   (rdev->wiphy.wowlan_config->eap_identity_req &&
                     nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
-                   (rdev->wowlan->four_way_handshake &&
+                   (rdev->wiphy.wowlan_config->four_way_handshake &&
                     nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
-                   (rdev->wowlan->rfkill_release &&
+                   (rdev->wiphy.wowlan_config->rfkill_release &&
                     nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
                        goto nla_put_failure;
 
                if (nl80211_send_wowlan_patterns(msg, rdev))
                        goto nla_put_failure;
 
-               if (nl80211_send_wowlan_tcp(msg, rdev->wowlan->tcp))
+               if (nl80211_send_wowlan_tcp(msg,
+                                           rdev->wiphy.wowlan_config->tcp))
                        goto nla_put_failure;
 
                nla_nest_end(msg, nl_wowlan);
@@ -7669,7 +7727,7 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
        u32 data_size, wake_size, tokens_size = 0, wake_mask_size;
        int err, port;
 
-       if (!rdev->wiphy.wowlan.tcp)
+       if (!rdev->wiphy.wowlan->tcp)
                return -EINVAL;
 
        err = nla_parse(tb, MAX_NL80211_WOWLAN_TCP,
@@ -7689,16 +7747,16 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
                return -EINVAL;
 
        data_size = nla_len(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD]);
-       if (data_size > rdev->wiphy.wowlan.tcp->data_payload_max)
+       if (data_size > rdev->wiphy.wowlan->tcp->data_payload_max)
                return -EINVAL;
 
        if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) >
-                       rdev->wiphy.wowlan.tcp->data_interval_max ||
+                       rdev->wiphy.wowlan->tcp->data_interval_max ||
            nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) == 0)
                return -EINVAL;
 
        wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]);
-       if (wake_size > rdev->wiphy.wowlan.tcp->wake_payload_max)
+       if (wake_size > rdev->wiphy.wowlan->tcp->wake_payload_max)
                return -EINVAL;
 
        wake_mask_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_MASK]);
@@ -7713,13 +7771,13 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
 
                if (!tok->len || tokens_size % tok->len)
                        return -EINVAL;
-               if (!rdev->wiphy.wowlan.tcp->tok)
+               if (!rdev->wiphy.wowlan->tcp->tok)
                        return -EINVAL;
-               if (tok->len > rdev->wiphy.wowlan.tcp->tok->max_len)
+               if (tok->len > rdev->wiphy.wowlan->tcp->tok->max_len)
                        return -EINVAL;
-               if (tok->len < rdev->wiphy.wowlan.tcp->tok->min_len)
+               if (tok->len < rdev->wiphy.wowlan->tcp->tok->min_len)
                        return -EINVAL;
-               if (tokens_size > rdev->wiphy.wowlan.tcp->tok->bufsize)
+               if (tokens_size > rdev->wiphy.wowlan->tcp->tok->bufsize)
                        return -EINVAL;
                if (tok->offset + tok->len > data_size)
                        return -EINVAL;
@@ -7727,7 +7785,7 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
 
        if (tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]) {
                seq = nla_data(tb[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ]);
-               if (!rdev->wiphy.wowlan.tcp->seq)
+               if (!rdev->wiphy.wowlan->tcp->seq)
                        return -EINVAL;
                if (seq->len == 0 || seq->len > 4)
                        return -EINVAL;
@@ -7808,17 +7866,16 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
        struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG];
        struct cfg80211_wowlan new_triggers = {};
        struct cfg80211_wowlan *ntrig;
-       struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan;
+       const struct wiphy_wowlan_support *wowlan = rdev->wiphy.wowlan;
        int err, i;
-       bool prev_enabled = rdev->wowlan;
+       bool prev_enabled = rdev->wiphy.wowlan_config;
 
-       if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns &&
-           !rdev->wiphy.wowlan.tcp)
+       if (!wowlan)
                return -EOPNOTSUPP;
 
        if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) {
                cfg80211_rdev_free_wowlan(rdev);
-               rdev->wowlan = NULL;
+               rdev->wiphy.wowlan_config = NULL;
                goto set_wakeup;
        }
 
@@ -7954,11 +8011,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                goto error;
        }
        cfg80211_rdev_free_wowlan(rdev);
-       rdev->wowlan = ntrig;
+       rdev->wiphy.wowlan_config = ntrig;
 
  set_wakeup:
-       if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan)
-               rdev_set_wakeup(rdev, rdev->wowlan);
+       if (rdev->ops->set_wakeup &&
+           prev_enabled != !!rdev->wiphy.wowlan_config)
+               rdev_set_wakeup(rdev, rdev->wiphy.wowlan_config);
 
        return 0;
  error:
@@ -8143,9 +8201,7 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
        if (wdev->p2p_started)
                return 0;
 
-       mutex_lock(&rdev->devlist_mtx);
        err = cfg80211_can_add_interface(rdev, wdev->iftype);
-       mutex_unlock(&rdev->devlist_mtx);
        if (err)
                return err;
 
@@ -8154,9 +8210,7 @@ static int nl80211_start_p2p_device(struct sk_buff *skb, struct genl_info *info)
                return err;
 
        wdev->p2p_started = true;
-       mutex_lock(&rdev->devlist_mtx);
        rdev->opencount++;
-       mutex_unlock(&rdev->devlist_mtx);
 
        return 0;
 }
@@ -8172,11 +8226,7 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->stop_p2p_device)
                return -EOPNOTSUPP;
 
-       mutex_lock(&rdev->devlist_mtx);
-       mutex_lock(&rdev->sched_scan_mtx);
        cfg80211_stop_p2p_device(rdev, wdev);
-       mutex_unlock(&rdev->sched_scan_mtx);
-       mutex_unlock(&rdev->devlist_mtx);
 
        return 0;
 }
@@ -8319,11 +8369,11 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
                info->user_ptr[0] = rdev;
        } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV ||
                   ops->internal_flags & NL80211_FLAG_NEED_WDEV) {
-               mutex_lock(&cfg80211_mutex);
+               ASSERT_RTNL();
+
                wdev = __cfg80211_wdev_from_attrs(genl_info_net(info),
                                                  info->attrs);
                if (IS_ERR(wdev)) {
-                       mutex_unlock(&cfg80211_mutex);
                        if (rtnl)
                                rtnl_unlock();
                        return PTR_ERR(wdev);
@@ -8334,7 +8384,6 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
 
                if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
                        if (!dev) {
-                               mutex_unlock(&cfg80211_mutex);
                                if (rtnl)
                                        rtnl_unlock();
                                return -EINVAL;
@@ -8348,7 +8397,6 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
                if (dev) {
                        if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP &&
                            !netif_running(dev)) {
-                               mutex_unlock(&cfg80211_mutex);
                                if (rtnl)
                                        rtnl_unlock();
                                return -ENETDOWN;
@@ -8357,17 +8405,12 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
                        dev_hold(dev);
                } else if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP) {
                        if (!wdev->p2p_started) {
-                               mutex_unlock(&cfg80211_mutex);
                                if (rtnl)
                                        rtnl_unlock();
                                return -ENETDOWN;
                        }
                }
 
-               cfg80211_lock_rdev(rdev);
-
-               mutex_unlock(&cfg80211_mutex);
-
                info->user_ptr[0] = rdev;
        }
 
@@ -8377,8 +8420,6 @@ static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
 static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb,
                              struct genl_info *info)
 {
-       if (info->user_ptr[0])
-               cfg80211_unlock_rdev(info->user_ptr[0]);
        if (info->user_ptr[1]) {
                if (ops->internal_flags & NL80211_FLAG_NEED_WDEV) {
                        struct wireless_dev *wdev = info->user_ptr[1];
@@ -8398,9 +8439,11 @@ static struct genl_ops nl80211_ops[] = {
                .cmd = NL80211_CMD_GET_WIPHY,
                .doit = nl80211_get_wiphy,
                .dumpit = nl80211_dump_wiphy,
+               .done = nl80211_dump_wiphy_done,
                .policy = nl80211_policy,
                /* can be retrieved by unprivileged users */
-               .internal_flags = NL80211_FLAG_NEED_WIPHY,
+               .internal_flags = NL80211_FLAG_NEED_WIPHY |
+                                 NL80211_FLAG_NEED_RTNL,
        },
        {
                .cmd = NL80211_CMD_SET_WIPHY,
@@ -8415,7 +8458,8 @@ static struct genl_ops nl80211_ops[] = {
                .dumpit = nl80211_dump_interface,
                .policy = nl80211_policy,
                /* can be retrieved by unprivileged users */
-               .internal_flags = NL80211_FLAG_NEED_WDEV,
+               .internal_flags = NL80211_FLAG_NEED_WDEV |
+                                 NL80211_FLAG_NEED_RTNL,
        },
        {
                .cmd = NL80211_CMD_SET_INTERFACE,
@@ -8574,6 +8618,7 @@ static struct genl_ops nl80211_ops[] = {
                .cmd = NL80211_CMD_GET_REG,
                .doit = nl80211_get_reg,
                .policy = nl80211_policy,
+               .internal_flags = NL80211_FLAG_NEED_RTNL,
                /* can be retrieved by unprivileged users */
        },
        {
@@ -8581,6 +8626,7 @@ static struct genl_ops nl80211_ops[] = {
                .doit = nl80211_set_reg,
                .policy = nl80211_policy,
                .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL80211_FLAG_NEED_RTNL,
        },
        {
                .cmd = NL80211_CMD_REQ_SET_REG,
@@ -9014,13 +9060,13 @@ static struct genl_multicast_group nl80211_regulatory_mcgrp = {
 void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev)
 {
        struct sk_buff *msg;
+       struct nl80211_dump_wiphy_state state = {};
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg)
                return;
 
-       if (nl80211_send_wiphy(rdev, msg, 0, 0, 0,
-                              false, NULL, NULL, NULL) < 0) {
+       if (nl80211_send_wiphy(rdev, msg, 0, 0, 0, &state) < 0) {
                nlmsg_free(msg);
                return;
        }
@@ -9036,8 +9082,6 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
        struct nlattr *nest;
        int i;
 
-       lockdep_assert_held(&rdev->sched_scan_mtx);
-
        if (WARN_ON(!req))
                return 0;
 
@@ -9344,31 +9388,27 @@ void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
                                NL80211_CMD_DISASSOCIATE, gfp);
 }
 
-void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf,
-                                size_t len)
+void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf,
+                                 size_t len)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       const struct ieee80211_mgmt *mgmt = (void *)buf;
+       u32 cmd;
 
-       trace_cfg80211_send_unprot_deauth(dev);
-       nl80211_send_mlme_event(rdev, dev, buf, len,
-                               NL80211_CMD_UNPROT_DEAUTHENTICATE, GFP_ATOMIC);
-}
-EXPORT_SYMBOL(cfg80211_send_unprot_deauth);
+       if (WARN_ON(len < 2))
+               return;
 
-void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
-                                  size_t len)
-{
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       if (ieee80211_is_deauth(mgmt->frame_control))
+               cmd = NL80211_CMD_UNPROT_DEAUTHENTICATE;
+       else
+               cmd = NL80211_CMD_UNPROT_DISASSOCIATE;
 
-       trace_cfg80211_send_unprot_disassoc(dev);
-       nl80211_send_mlme_event(rdev, dev, buf, len,
-                               NL80211_CMD_UNPROT_DISASSOCIATE, GFP_ATOMIC);
+       trace_cfg80211_rx_unprot_mlme_mgmt(dev, buf, len);
+       nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC);
 }
-EXPORT_SYMBOL(cfg80211_send_unprot_disassoc);
+EXPORT_SYMBOL(cfg80211_rx_unprot_mlme_mgmt);
 
 static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
                                      struct net_device *netdev, int cmd,
@@ -9879,7 +9919,6 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
-       int err;
        u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid);
 
        if (!nlportid)
@@ -9900,12 +9939,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
                goto nla_put_failure;
 
-       err = genlmsg_end(msg, hdr);
-       if (err < 0) {
-               nlmsg_free(msg);
-               return true;
-       }
-
+       genlmsg_end(msg, hdr);
        genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
        return true;
 
@@ -10348,10 +10382,7 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
        if (nl80211_send_chandef(msg, chandef))
                goto nla_put_failure;
 
-       if (genlmsg_end(msg, hdr) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -10417,7 +10448,6 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
-       int err;
 
        trace_cfg80211_probe_status(dev, addr, cookie, acked);
 
@@ -10439,11 +10469,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
            (acked && nla_put_flag(msg, NL80211_ATTR_ACK)))
                goto nla_put_failure;
 
-       err = genlmsg_end(msg, hdr);
-       if (err < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -10509,7 +10535,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
-       int err, size = 200;
+       int size = 200;
 
        trace_cfg80211_report_wowlan_wakeup(wdev->wiphy, wdev, wakeup);
 
@@ -10595,9 +10621,7 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
                nla_nest_end(msg, reasons);
        }
 
-       err = genlmsg_end(msg, hdr);
-       if (err < 0)
-               goto free_msg;
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -10617,7 +10641,6 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
-       int err;
 
        trace_cfg80211_tdls_oper_request(wdev->wiphy, dev, peer, oper,
                                         reason_code);
@@ -10640,11 +10663,7 @@ void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
             nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason_code)))
                goto nla_put_failure;
 
-       err = genlmsg_end(msg, hdr);
-       if (err < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, gfp);
@@ -10702,7 +10721,6 @@ void cfg80211_ft_event(struct net_device *netdev,
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
        struct sk_buff *msg;
        void *hdr;
-       int err;
 
        trace_cfg80211_ft_event(wiphy, netdev, ft_event);
 
@@ -10728,11 +10746,7 @@ void cfg80211_ft_event(struct net_device *netdev,
                nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len,
                        ft_event->ric_ies);
 
-       err = genlmsg_end(msg, hdr);
-       if (err < 0) {
-               nlmsg_free(msg);
-               return;
-       }
+       genlmsg_end(msg, hdr);
 
        genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
                                nl80211_mlme_mcgrp.id, GFP_KERNEL);
index cc35fbaa45787ddeba99c0ea67ed4c5e317e7dd0..5a24c986f34be27e61613f0deb421a35efc61533 100644 (file)
@@ -81,7 +81,10 @@ static struct regulatory_request core_request_world = {
        .country_ie_env = ENVIRON_ANY,
 };
 
-/* Receipt of information from last regulatory request */
+/*
+ * Receipt of information from last regulatory request,
+ * protected by RTNL (and can be accessed with RCU protection)
+ */
 static struct regulatory_request __rcu *last_request =
        (void __rcu *)&core_request_world;
 
@@ -96,39 +99,25 @@ static struct device_type reg_device_type = {
  * Central wireless core regulatory domains, we only need two,
  * the current one and a world regulatory domain in case we have no
  * information to give us an alpha2.
+ * (protected by RTNL, can be read under RCU)
  */
 const struct ieee80211_regdomain __rcu *cfg80211_regdomain;
 
-/*
- * Protects static reg.c components:
- *     - cfg80211_regdomain (if not used with RCU)
- *     - cfg80211_world_regdom
- *     - last_request (if not used with RCU)
- *     - reg_num_devs_support_basehint
- */
-static DEFINE_MUTEX(reg_mutex);
-
 /*
  * Number of devices that registered to the core
  * that support cellular base station regulatory hints
+ * (protected by RTNL)
  */
 static int reg_num_devs_support_basehint;
 
-static inline void assert_reg_lock(void)
-{
-       lockdep_assert_held(&reg_mutex);
-}
-
 static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
 {
-       return rcu_dereference_protected(cfg80211_regdomain,
-                                        lockdep_is_held(&reg_mutex));
+       return rtnl_dereference(cfg80211_regdomain);
 }
 
 static const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy)
 {
-       return rcu_dereference_protected(wiphy->regd,
-                                        lockdep_is_held(&reg_mutex));
+       return rtnl_dereference(wiphy->regd);
 }
 
 static void rcu_free_regdom(const struct ieee80211_regdomain *r)
@@ -140,8 +129,7 @@ static void rcu_free_regdom(const struct ieee80211_regdomain *r)
 
 static struct regulatory_request *get_last_request(void)
 {
-       return rcu_dereference_check(last_request,
-                                    lockdep_is_held(&reg_mutex));
+       return rcu_dereference_rtnl(last_request);
 }
 
 /* Used to queue up regulatory hints */
@@ -200,6 +188,7 @@ static const struct ieee80211_regdomain world_regdom = {
        }
 };
 
+/* protected by RTNL */
 static const struct ieee80211_regdomain *cfg80211_world_regdom =
        &world_regdom;
 
@@ -215,7 +204,7 @@ static void reset_regdomains(bool full_reset,
        const struct ieee80211_regdomain *r;
        struct regulatory_request *lr;
 
-       assert_reg_lock();
+       ASSERT_RTNL();
 
        r = get_cfg80211_regdom();
 
@@ -377,7 +366,7 @@ static void reg_regdb_search(struct work_struct *work)
        const struct ieee80211_regdomain *curdom, *regdom = NULL;
        int i;
 
-       mutex_lock(&cfg80211_mutex);
+       rtnl_lock();
 
        mutex_lock(&reg_regdb_search_mutex);
        while (!list_empty(&reg_regdb_search_list)) {
@@ -402,7 +391,7 @@ static void reg_regdb_search(struct work_struct *work)
        if (!IS_ERR_OR_NULL(regdom))
                set_regdom(regdom);
 
-       mutex_unlock(&cfg80211_mutex);
+       rtnl_unlock();
 }
 
 static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
@@ -936,13 +925,7 @@ static bool reg_request_cell_base(struct regulatory_request *request)
 
 bool reg_last_request_cell_base(void)
 {
-       bool val;
-
-       mutex_lock(&reg_mutex);
-       val = reg_request_cell_base(get_last_request());
-       mutex_unlock(&reg_mutex);
-
-       return val;
+       return reg_request_cell_base(get_last_request());
 }
 
 #ifdef CONFIG_CFG80211_CERTIFICATION_ONUS
@@ -1225,7 +1208,7 @@ static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
        struct cfg80211_registered_device *rdev;
        struct wiphy *wiphy;
 
-       assert_cfg80211_lock();
+       ASSERT_RTNL();
 
        list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
                wiphy = &rdev->wiphy;
@@ -1362,7 +1345,7 @@ get_reg_request_treatment(struct wiphy *wiphy,
                                return REG_REQ_OK;
                        return REG_REQ_ALREADY_SET;
                }
-               return 0;
+               return REG_REQ_OK;
        case NL80211_REGDOM_SET_BY_DRIVER:
                if (lr->initiator == NL80211_REGDOM_SET_BY_CORE) {
                        if (regdom_changes(pending_request->alpha2))
@@ -1444,8 +1427,6 @@ static void reg_set_request_processed(void)
  * what it believes should be the current regulatory domain.
  *
  * Returns one of the different reg request treatment values.
- *
- * Caller must hold &reg_mutex
  */
 static enum reg_request_treatment
 __regulatory_hint(struct wiphy *wiphy,
@@ -1570,21 +1551,19 @@ static void reg_process_pending_hints(void)
 {
        struct regulatory_request *reg_request, *lr;
 
-       mutex_lock(&cfg80211_mutex);
-       mutex_lock(&reg_mutex);
        lr = get_last_request();
 
        /* When last_request->processed becomes true this will be rescheduled */
        if (lr && !lr->processed) {
                REG_DBG_PRINT("Pending regulatory request, waiting for it to be processed...\n");
-               goto out;
+               return;
        }
 
        spin_lock(&reg_requests_lock);
 
        if (list_empty(&reg_requests_list)) {
                spin_unlock(&reg_requests_lock);
-               goto out;
+               return;
        }
 
        reg_request = list_first_entry(&reg_requests_list,
@@ -1595,10 +1574,6 @@ static void reg_process_pending_hints(void)
        spin_unlock(&reg_requests_lock);
 
        reg_process_hint(reg_request, reg_request->initiator);
-
-out:
-       mutex_unlock(&reg_mutex);
-       mutex_unlock(&cfg80211_mutex);
 }
 
 /* Processes beacon hints -- this has nothing to do with country IEs */
@@ -1607,9 +1582,6 @@ static void reg_process_pending_beacon_hints(void)
        struct cfg80211_registered_device *rdev;
        struct reg_beacon *pending_beacon, *tmp;
 
-       mutex_lock(&cfg80211_mutex);
-       mutex_lock(&reg_mutex);
-
        /* This goes through the _pending_ beacon list */
        spin_lock_bh(&reg_pending_beacons_lock);
 
@@ -1626,14 +1598,14 @@ static void reg_process_pending_beacon_hints(void)
        }
 
        spin_unlock_bh(&reg_pending_beacons_lock);
-       mutex_unlock(&reg_mutex);
-       mutex_unlock(&cfg80211_mutex);
 }
 
 static void reg_todo(struct work_struct *work)
 {
+       rtnl_lock();
        reg_process_pending_hints();
        reg_process_pending_beacon_hints();
+       rtnl_unlock();
 }
 
 static void queue_regulatory_request(struct regulatory_request *request)
@@ -1717,29 +1689,23 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
 }
 EXPORT_SYMBOL(regulatory_hint);
 
-/*
- * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and
- * therefore cannot iterate over the rdev list here.
- */
 void regulatory_hint_11d(struct wiphy *wiphy, enum ieee80211_band band,
                         const u8 *country_ie, u8 country_ie_len)
 {
        char alpha2[2];
        enum environment_cap env = ENVIRON_ANY;
-       struct regulatory_request *request, *lr;
-
-       mutex_lock(&reg_mutex);
-       lr = get_last_request();
-
-       if (unlikely(!lr))
-               goto out;
+       struct regulatory_request *request = NULL, *lr;
 
        /* IE len must be evenly divisible by 2 */
        if (country_ie_len & 0x01)
-               goto out;
+               return;
 
        if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
-               goto out;
+               return;
+
+       request = kzalloc(sizeof(*request), GFP_KERNEL);
+       if (!request)
+               return;
 
        alpha2[0] = country_ie[0];
        alpha2[1] = country_ie[1];
@@ -1749,19 +1715,21 @@ void regulatory_hint_11d(struct wiphy *wiphy, enum ieee80211_band band,
        else if (country_ie[2] == 'O')
                env = ENVIRON_OUTDOOR;
 
+       rcu_read_lock();
+       lr = get_last_request();
+
+       if (unlikely(!lr))
+               goto out;
+
        /*
         * We will run this only upon a successful connection on cfg80211.
         * We leave conflict resolution to the workqueue, where can hold
-        * cfg80211_mutex.
+        * the RTNL.
         */
        if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
            lr->wiphy_idx != WIPHY_IDX_INVALID)
                goto out;
 
-       request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
-       if (!request)
-               goto out;
-
        request->wiphy_idx = get_wiphy_idx(wiphy);
        request->alpha2[0] = alpha2[0];
        request->alpha2[1] = alpha2[1];
@@ -1769,8 +1737,10 @@ void regulatory_hint_11d(struct wiphy *wiphy, enum ieee80211_band band,
        request->country_ie_env = env;
 
        queue_regulatory_request(request);
+       request = NULL;
 out:
-       mutex_unlock(&reg_mutex);
+       kfree(request);
+       rcu_read_unlock();
 }
 
 static void restore_alpha2(char *alpha2, bool reset_user)
@@ -1858,8 +1828,7 @@ static void restore_regulatory_settings(bool reset_user)
        LIST_HEAD(tmp_reg_req_list);
        struct cfg80211_registered_device *rdev;
 
-       mutex_lock(&cfg80211_mutex);
-       mutex_lock(&reg_mutex);
+       ASSERT_RTNL();
 
        reset_regdomains(true, &world_regdom);
        restore_alpha2(alpha2, reset_user);
@@ -1914,9 +1883,6 @@ static void restore_regulatory_settings(bool reset_user)
        list_splice_tail_init(&tmp_reg_req_list, &reg_requests_list);
        spin_unlock(&reg_requests_lock);
 
-       mutex_unlock(&reg_mutex);
-       mutex_unlock(&cfg80211_mutex);
-
        REG_DBG_PRINT("Kicking the queue\n");
 
        schedule_work(&reg_work);
@@ -2231,7 +2197,6 @@ int set_regdom(const struct ieee80211_regdomain *rd)
        struct regulatory_request *lr;
        int r;
 
-       mutex_lock(&reg_mutex);
        lr = get_last_request();
 
        /* Note that this doesn't update the wiphys, this is done below */
@@ -2241,14 +2206,12 @@ int set_regdom(const struct ieee80211_regdomain *rd)
                        reg_set_request_processed();
 
                kfree(rd);
-               goto out;
+               return r;
        }
 
        /* This would make this whole thing pointless */
-       if (WARN_ON(!lr->intersect && rd != get_cfg80211_regdom())) {
-               r = -EINVAL;
-               goto out;
-       }
+       if (WARN_ON(!lr->intersect && rd != get_cfg80211_regdom()))
+               return -EINVAL;
 
        /* update all wiphys now with the new established regulatory domain */
        update_all_wiphy_regulatory(lr->initiator);
@@ -2259,10 +2222,7 @@ int set_regdom(const struct ieee80211_regdomain *rd)
 
        reg_set_request_processed();
 
- out:
-       mutex_unlock(&reg_mutex);
-
-       return r;
+       return 0;
 }
 
 int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
@@ -2287,23 +2247,17 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
 
 void wiphy_regulatory_register(struct wiphy *wiphy)
 {
-       mutex_lock(&reg_mutex);
-
        if (!reg_dev_ignore_cell_hint(wiphy))
                reg_num_devs_support_basehint++;
 
        wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
-
-       mutex_unlock(&reg_mutex);
 }
 
-/* Caller must hold cfg80211_mutex */
 void wiphy_regulatory_deregister(struct wiphy *wiphy)
 {
        struct wiphy *request_wiphy = NULL;
        struct regulatory_request *lr;
 
-       mutex_lock(&reg_mutex);
        lr = get_last_request();
 
        if (!reg_dev_ignore_cell_hint(wiphy))
@@ -2316,12 +2270,10 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy)
                request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
 
        if (!request_wiphy || request_wiphy != wiphy)
-               goto out;
+               return;
 
        lr->wiphy_idx = WIPHY_IDX_INVALID;
        lr->country_ie_env = ENVIRON_ANY;
-out:
-       mutex_unlock(&reg_mutex);
 }
 
 static void reg_timeout_work(struct work_struct *work)
@@ -2385,9 +2337,9 @@ void regulatory_exit(void)
        cancel_delayed_work_sync(&reg_timeout);
 
        /* Lock to suppress warnings */
-       mutex_lock(&reg_mutex);
+       rtnl_lock();
        reset_regdomains(true, NULL);
-       mutex_unlock(&reg_mutex);
+       rtnl_unlock();
 
        dev_set_uevent_suppress(&reg_pdev->dev, true);
 
index fd99ea495b7e68cd9e5265542a6aa059d0ac7760..ae8c186b50d68510f88e758b959ad663e911a5de 100644 (file)
@@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
        union iwreq_data wrqu;
 #endif
 
-       lockdep_assert_held(&rdev->sched_scan_mtx);
+       ASSERT_RTNL();
 
        request = rdev->scan_req;
 
@@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk)
        rdev = container_of(wk, struct cfg80211_registered_device,
                            scan_done_wk);
 
-       mutex_lock(&rdev->sched_scan_mtx);
+       rtnl_lock();
        ___cfg80211_scan_done(rdev, false);
-       mutex_unlock(&rdev->sched_scan_mtx);
+       rtnl_unlock();
 }
 
 void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
@@ -241,6 +241,7 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
        WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req);
 
        request->aborted = aborted;
+       request->notified = true;
        queue_work(cfg80211_wq, &wiphy_to_dev(request->wiphy)->scan_done_wk);
 }
 EXPORT_SYMBOL(cfg80211_scan_done);
@@ -255,7 +256,7 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
 
        request = rdev->sched_scan_req;
 
-       mutex_lock(&rdev->sched_scan_mtx);
+       rtnl_lock();
 
        /* we don't have sched_scan_req anymore if the scan is stopping */
        if (request) {
@@ -270,7 +271,7 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
                nl80211_send_sched_scan_results(rdev, request->dev);
        }
 
-       mutex_unlock(&rdev->sched_scan_mtx);
+       rtnl_unlock();
 }
 
 void cfg80211_sched_scan_results(struct wiphy *wiphy)
@@ -289,9 +290,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
 
        trace_cfg80211_sched_scan_stopped(wiphy);
 
-       mutex_lock(&rdev->sched_scan_mtx);
+       rtnl_lock();
        __cfg80211_stop_sched_scan(rdev, true);
-       mutex_unlock(&rdev->sched_scan_mtx);
+       rtnl_unlock();
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
 
@@ -300,7 +301,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
 {
        struct net_device *dev;
 
-       lockdep_assert_held(&rdev->sched_scan_mtx);
+       ASSERT_RTNL();
 
        if (!rdev->sched_scan_req)
                return -ENOENT;
@@ -522,6 +523,7 @@ static int cmp_bss(struct cfg80211_bss *a,
        }
 }
 
+/* Returned bss is reference counted and must be cleaned up appropriately. */
 struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
                                      struct ieee80211_channel *channel,
                                      const u8 *bssid,
@@ -677,6 +679,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
        return true;
 }
 
+/* Returned bss is reference counted and must be cleaned up appropriately. */
 static struct cfg80211_internal_bss *
 cfg80211_bss_update(struct cfg80211_registered_device *dev,
                    struct cfg80211_internal_bss *tmp)
@@ -865,6 +868,7 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
        return channel;
 }
 
+/* Returned bss is reference counted and must be cleaned up appropriately. */
 struct cfg80211_bss*
 cfg80211_inform_bss(struct wiphy *wiphy,
                    struct ieee80211_channel *channel,
@@ -922,6 +926,7 @@ cfg80211_inform_bss(struct wiphy *wiphy,
 }
 EXPORT_SYMBOL(cfg80211_inform_bss);
 
+/* Returned bss is reference counted and must be cleaned up appropriately. */
 struct cfg80211_bss *
 cfg80211_inform_bss_frame(struct wiphy *wiphy,
                          struct ieee80211_channel *channel,
@@ -1040,6 +1045,25 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 EXPORT_SYMBOL(cfg80211_unlink_bss);
 
 #ifdef CONFIG_CFG80211_WEXT
+static struct cfg80211_registered_device *
+cfg80211_get_dev_from_ifindex(struct net *net, int ifindex)
+{
+       struct cfg80211_registered_device *rdev;
+       struct net_device *dev;
+
+       ASSERT_RTNL();
+
+       dev = dev_get_by_index(net, ifindex);
+       if (!dev)
+               return ERR_PTR(-ENODEV);
+       if (dev->ieee80211_ptr)
+               rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy);
+       else
+               rdev = ERR_PTR(-ENODEV);
+       dev_put(dev);
+       return rdev;
+}
+
 int cfg80211_wext_siwscan(struct net_device *dev,
                          struct iw_request_info *info,
                          union iwreq_data *wrqu, char *extra)
@@ -1062,7 +1086,6 @@ int cfg80211_wext_siwscan(struct net_device *dev,
        if (IS_ERR(rdev))
                return PTR_ERR(rdev);
 
-       mutex_lock(&rdev->sched_scan_mtx);
        if (rdev->scan_req) {
                err = -EBUSY;
                goto out;
@@ -1169,9 +1192,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
                dev_hold(dev);
        }
  out:
-       mutex_unlock(&rdev->sched_scan_mtx);
        kfree(creq);
-       cfg80211_unlock_rdev(rdev);
        return err;
 }
 EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan);
@@ -1470,10 +1491,8 @@ int cfg80211_wext_giwscan(struct net_device *dev,
        if (IS_ERR(rdev))
                return PTR_ERR(rdev);
 
-       if (rdev->scan_req) {
-               res = -EAGAIN;
-               goto out;
-       }
+       if (rdev->scan_req)
+               return -EAGAIN;
 
        res = ieee80211_scan_results(rdev, info, extra, data->length);
        data->length = 0;
@@ -1482,8 +1501,6 @@ int cfg80211_wext_giwscan(struct net_device *dev,
                res = 0;
        }
 
- out:
-       cfg80211_unlock_rdev(rdev);
        return res;
 }
 EXPORT_SYMBOL_GPL(cfg80211_wext_giwscan);
index 3ed35c345caed81966eb0ad52b7e1a4b350ec473..1d3cfb1a3f28b3a74a32c1c629ddbcba7fb30def 100644 (file)
@@ -1,5 +1,7 @@
 /*
- * SME code for cfg80211's connect emulation.
+ * SME code for cfg80211
+ * both driver SME event handling and the SME implementation
+ * (for nl80211's connect() and wext)
  *
  * Copyright 2009      Johannes Berg <johannes@sipsolutions.net>
  * Copyright (C) 2009   Intel Corporation. All rights reserved.
 #include "reg.h"
 #include "rdev-ops.h"
 
+/*
+ * Software SME in cfg80211, using auth/assoc/deauth calls to the
+ * driver. This is is for implementing nl80211's connect/disconnect
+ * and wireless extensions (if configured.)
+ */
+
 struct cfg80211_conn {
        struct cfg80211_connect_params params;
        /* these are sub-states of the _CONNECTING sme_state */
        enum {
-               CFG80211_CONN_IDLE,
                CFG80211_CONN_SCANNING,
                CFG80211_CONN_SCAN_AGAIN,
                CFG80211_CONN_AUTHENTICATE_NEXT,
                CFG80211_CONN_AUTHENTICATING,
                CFG80211_CONN_ASSOCIATE_NEXT,
                CFG80211_CONN_ASSOCIATING,
-               CFG80211_CONN_DEAUTH_ASSOC_FAIL,
+               CFG80211_CONN_DEAUTH,
+               CFG80211_CONN_CONNECTED,
        } state;
        u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
        u8 *ie;
@@ -37,45 +45,16 @@ struct cfg80211_conn {
        bool auto_auth, prev_bssid_valid;
 };
 
-static bool cfg80211_is_all_idle(void)
-{
-       struct cfg80211_registered_device *rdev;
-       struct wireless_dev *wdev;
-       bool is_all_idle = true;
-
-       mutex_lock(&cfg80211_mutex);
-
-       /*
-        * All devices must be idle as otherwise if you are actively
-        * scanning some new beacon hints could be learned and would
-        * count as new regulatory hints.
-        */
-       list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
-               cfg80211_lock_rdev(rdev);
-               list_for_each_entry(wdev, &rdev->wdev_list, list) {
-                       wdev_lock(wdev);
-                       if (wdev->sme_state != CFG80211_SME_IDLE)
-                               is_all_idle = false;
-                       wdev_unlock(wdev);
-               }
-               cfg80211_unlock_rdev(rdev);
-       }
-
-       mutex_unlock(&cfg80211_mutex);
-
-       return is_all_idle;
-}
-
-static void disconnect_work(struct work_struct *work)
+static void cfg80211_sme_free(struct wireless_dev *wdev)
 {
-       if (!cfg80211_is_all_idle())
+       if (!wdev->conn)
                return;
 
-       regulatory_hint_disconnect();
+       kfree(wdev->conn->ie);
+       kfree(wdev->conn);
+       wdev->conn = NULL;
 }
 
-static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
-
 static int cfg80211_conn_scan(struct wireless_dev *wdev)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -85,7 +64,6 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
        ASSERT_RTNL();
        ASSERT_RDEV_LOCK(rdev);
        ASSERT_WDEV_LOCK(wdev);
-       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        if (rdev->scan_req)
                return -EBUSY;
@@ -171,18 +149,21 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
        params = &wdev->conn->params;
 
        switch (wdev->conn->state) {
+       case CFG80211_CONN_SCANNING:
+               /* didn't find it during scan ... */
+               return -ENOENT;
        case CFG80211_CONN_SCAN_AGAIN:
                return cfg80211_conn_scan(wdev);
        case CFG80211_CONN_AUTHENTICATE_NEXT:
                BUG_ON(!rdev->ops->auth);
                wdev->conn->state = CFG80211_CONN_AUTHENTICATING;
-               return __cfg80211_mlme_auth(rdev, wdev->netdev,
-                                           params->channel, params->auth_type,
-                                           params->bssid,
-                                           params->ssid, params->ssid_len,
-                                           NULL, 0,
-                                           params->key, params->key_len,
-                                           params->key_idx, NULL, 0);
+               return cfg80211_mlme_auth(rdev, wdev->netdev,
+                                         params->channel, params->auth_type,
+                                         params->bssid,
+                                         params->ssid, params->ssid_len,
+                                         NULL, 0,
+                                         params->key, params->key_len,
+                                         params->key_idx, NULL, 0);
        case CFG80211_CONN_ASSOCIATE_NEXT:
                BUG_ON(!rdev->ops->assoc);
                wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -198,21 +179,20 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
                req.vht_capa = params->vht_capa;
                req.vht_capa_mask = params->vht_capa_mask;
 
-               err = __cfg80211_mlme_assoc(rdev, wdev->netdev, params->channel,
-                                           params->bssid, params->ssid,
-                                           params->ssid_len, &req);
+               err = cfg80211_mlme_assoc(rdev, wdev->netdev, params->channel,
+                                         params->bssid, params->ssid,
+                                         params->ssid_len, &req);
                if (err)
-                       __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
-                                              NULL, 0,
-                                              WLAN_REASON_DEAUTH_LEAVING,
-                                              false);
+                       cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
+                                            NULL, 0,
+                                            WLAN_REASON_DEAUTH_LEAVING,
+                                            false);
                return err;
-       case CFG80211_CONN_DEAUTH_ASSOC_FAIL:
-               __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
-                                      NULL, 0,
-                                      WLAN_REASON_DEAUTH_LEAVING, false);
-               /* return an error so that we call __cfg80211_connect_result() */
-               return -EINVAL;
+       case CFG80211_CONN_DEAUTH:
+               cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
+                                    NULL, 0,
+                                    WLAN_REASON_DEAUTH_LEAVING, false);
+               return 0;
        default:
                return 0;
        }
@@ -226,9 +206,6 @@ void cfg80211_conn_work(struct work_struct *work)
        u8 bssid_buf[ETH_ALEN], *bssid = NULL;
 
        rtnl_lock();
-       cfg80211_lock_rdev(rdev);
-       mutex_lock(&rdev->devlist_mtx);
-       mutex_lock(&rdev->sched_scan_mtx);
 
        list_for_each_entry(wdev, &rdev->wdev_list, list) {
                if (!wdev->netdev)
@@ -239,7 +216,8 @@ void cfg80211_conn_work(struct work_struct *work)
                        wdev_unlock(wdev);
                        continue;
                }
-               if (wdev->sme_state != CFG80211_SME_CONNECTING || !wdev->conn) {
+               if (!wdev->conn ||
+                   wdev->conn->state == CFG80211_CONN_CONNECTED) {
                        wdev_unlock(wdev);
                        continue;
                }
@@ -247,21 +225,21 @@ void cfg80211_conn_work(struct work_struct *work)
                        memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN);
                        bssid = bssid_buf;
                }
-               if (cfg80211_conn_do_work(wdev))
+               if (cfg80211_conn_do_work(wdev)) {
                        __cfg80211_connect_result(
                                        wdev->netdev, bssid,
                                        NULL, 0, NULL, 0,
                                        WLAN_STATUS_UNSPECIFIED_FAILURE,
                                        false, NULL);
+                       cfg80211_sme_free(wdev);
+               }
                wdev_unlock(wdev);
        }
 
-       mutex_unlock(&rdev->sched_scan_mtx);
-       mutex_unlock(&rdev->devlist_mtx);
-       cfg80211_unlock_rdev(rdev);
        rtnl_unlock();
 }
 
+/* Returned bss is reference counted and must be cleaned up appropriately. */
 static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -299,9 +277,6 @@ static void __cfg80211_sme_scan_done(struct net_device *dev)
 
        ASSERT_WDEV_LOCK(wdev);
 
-       if (wdev->sme_state != CFG80211_SME_CONNECTING)
-               return;
-
        if (!wdev->conn)
                return;
 
@@ -310,20 +285,10 @@ static void __cfg80211_sme_scan_done(struct net_device *dev)
                return;
 
        bss = cfg80211_get_conn_bss(wdev);
-       if (bss) {
+       if (bss)
                cfg80211_put_bss(&rdev->wiphy, bss);
-       } else {
-               /* not found */
-               if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)
-                       schedule_work(&rdev->conn_work);
-               else
-                       __cfg80211_connect_result(
-                                       wdev->netdev,
-                                       wdev->conn->params.bssid,
-                                       NULL, 0, NULL, 0,
-                                       WLAN_STATUS_UNSPECIFIED_FAILURE,
-                                       false, NULL);
-       }
+       else
+               schedule_work(&rdev->conn_work);
 }
 
 void cfg80211_sme_scan_done(struct net_device *dev)
@@ -335,10 +300,8 @@ void cfg80211_sme_scan_done(struct net_device *dev)
        wdev_unlock(wdev);
 }
 
-void cfg80211_sme_rx_auth(struct net_device *dev,
-                         const u8 *buf, size_t len)
+void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
 {
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct wiphy *wiphy = wdev->wiphy;
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
        struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
@@ -346,11 +309,7 @@ void cfg80211_sme_rx_auth(struct net_device *dev,
 
        ASSERT_WDEV_LOCK(wdev);
 
-       /* should only RX auth frames when connecting */
-       if (wdev->sme_state != CFG80211_SME_CONNECTING)
-               return;
-
-       if (WARN_ON(!wdev->conn))
+       if (!wdev->conn || wdev->conn->state == CFG80211_CONN_CONNECTED)
                return;
 
        if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG &&
@@ -379,46 +338,227 @@ void cfg80211_sme_rx_auth(struct net_device *dev,
                wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
                schedule_work(&rdev->conn_work);
        } else if (status_code != WLAN_STATUS_SUCCESS) {
-               __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0,
+               __cfg80211_connect_result(wdev->netdev, mgmt->bssid,
+                                         NULL, 0, NULL, 0,
                                          status_code, false, NULL);
-       } else if (wdev->sme_state == CFG80211_SME_CONNECTING &&
-                wdev->conn->state == CFG80211_CONN_AUTHENTICATING) {
+       } else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) {
                wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
                schedule_work(&rdev->conn_work);
        }
 }
 
-bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev)
+bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status)
 {
-       struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
 
-       if (WARN_ON(!wdev->conn))
+       if (!wdev->conn)
                return false;
 
-       if (!wdev->conn->prev_bssid_valid)
+       if (status == WLAN_STATUS_SUCCESS) {
+               wdev->conn->state = CFG80211_CONN_CONNECTED;
                return false;
+       }
 
-       /*
-        * Some stupid APs don't accept reassoc, so we
-        * need to fall back to trying regular assoc.
-        */
-       wdev->conn->prev_bssid_valid = false;
-       wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
+       if (wdev->conn->prev_bssid_valid) {
+               /*
+                * Some stupid APs don't accept reassoc, so we
+                * need to fall back to trying regular assoc;
+                * return true so no event is sent to userspace.
+                */
+               wdev->conn->prev_bssid_valid = false;
+               wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
+               schedule_work(&rdev->conn_work);
+               return true;
+       }
+
+       wdev->conn->state = CFG80211_CONN_DEAUTH;
        schedule_work(&rdev->conn_work);
+       return false;
+}
 
-       return true;
+void cfg80211_sme_deauth(struct wireless_dev *wdev)
+{
+       cfg80211_sme_free(wdev);
 }
 
-void cfg80211_sme_failed_assoc(struct wireless_dev *wdev)
+void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
 {
-       struct wiphy *wiphy = wdev->wiphy;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       cfg80211_sme_free(wdev);
+}
 
-       wdev->conn->state = CFG80211_CONN_DEAUTH_ASSOC_FAIL;
+void cfg80211_sme_disassoc(struct wireless_dev *wdev)
+{
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+       if (!wdev->conn)
+               return;
+
+       wdev->conn->state = CFG80211_CONN_DEAUTH;
        schedule_work(&rdev->conn_work);
 }
 
+void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
+{
+       cfg80211_sme_disassoc(wdev);
+}
+
+static int cfg80211_sme_connect(struct wireless_dev *wdev,
+                               struct cfg80211_connect_params *connect,
+                               const u8 *prev_bssid)
+{
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       struct cfg80211_bss *bss;
+       int err;
+
+       if (!rdev->ops->auth || !rdev->ops->assoc)
+               return -EOPNOTSUPP;
+
+       if (wdev->current_bss)
+               return -EALREADY;
+
+       if (WARN_ON(wdev->conn))
+               return -EINPROGRESS;
+
+       wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL);
+       if (!wdev->conn)
+               return -ENOMEM;
+
+       /*
+        * Copy all parameters, and treat explicitly IEs, BSSID, SSID.
+        */
+       memcpy(&wdev->conn->params, connect, sizeof(*connect));
+       if (connect->bssid) {
+               wdev->conn->params.bssid = wdev->conn->bssid;
+               memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN);
+       }
+
+       if (connect->ie) {
+               wdev->conn->ie = kmemdup(connect->ie, connect->ie_len,
+                                       GFP_KERNEL);
+               wdev->conn->params.ie = wdev->conn->ie;
+               if (!wdev->conn->ie) {
+                       kfree(wdev->conn);
+                       wdev->conn = NULL;
+                       return -ENOMEM;
+               }
+       }
+
+       if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) {
+               wdev->conn->auto_auth = true;
+               /* start with open system ... should mostly work */
+               wdev->conn->params.auth_type =
+                       NL80211_AUTHTYPE_OPEN_SYSTEM;
+       } else {
+               wdev->conn->auto_auth = false;
+       }
+
+       wdev->conn->params.ssid = wdev->ssid;
+       wdev->conn->params.ssid_len = connect->ssid_len;
+
+       /* see if we have the bss already */
+       bss = cfg80211_get_conn_bss(wdev);
+
+       if (prev_bssid) {
+               memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN);
+               wdev->conn->prev_bssid_valid = true;
+       }
+
+       /* we're good if we have a matching bss struct */
+       if (bss) {
+               wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
+               err = cfg80211_conn_do_work(wdev);
+               cfg80211_put_bss(wdev->wiphy, bss);
+       } else {
+               /* otherwise we'll need to scan for the AP first */
+               err = cfg80211_conn_scan(wdev);
+
+               /*
+                * If we can't scan right now, then we need to scan again
+                * after the current scan finished, since the parameters
+                * changed (unless we find a good AP anyway).
+                */
+               if (err == -EBUSY) {
+                       err = 0;
+                       wdev->conn->state = CFG80211_CONN_SCAN_AGAIN;
+               }
+       }
+
+       if (err)
+               cfg80211_sme_free(wdev);
+
+       return err;
+}
+
+static int cfg80211_sme_disconnect(struct wireless_dev *wdev, u16 reason)
+{
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+       int err;
+
+       if (!wdev->conn)
+               return 0;
+
+       if (!rdev->ops->deauth)
+               return -EOPNOTSUPP;
+
+       if (wdev->conn->state == CFG80211_CONN_SCANNING ||
+           wdev->conn->state == CFG80211_CONN_SCAN_AGAIN) {
+               err = 0;
+               goto out;
+       }
+
+       /* wdev->conn->params.bssid must be set if > SCANNING */
+       err = cfg80211_mlme_deauth(rdev, wdev->netdev,
+                                  wdev->conn->params.bssid,
+                                  NULL, 0, reason, false);
+ out:
+       cfg80211_sme_free(wdev);
+       return err;
+}
+
+/*
+ * code shared for in-device and software SME
+ */
+
+static bool cfg80211_is_all_idle(void)
+{
+       struct cfg80211_registered_device *rdev;
+       struct wireless_dev *wdev;
+       bool is_all_idle = true;
+
+       /*
+        * All devices must be idle as otherwise if you are actively
+        * scanning some new beacon hints could be learned and would
+        * count as new regulatory hints.
+        */
+       list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+               list_for_each_entry(wdev, &rdev->wdev_list, list) {
+                       wdev_lock(wdev);
+                       if (wdev->conn || wdev->current_bss)
+                               is_all_idle = false;
+                       wdev_unlock(wdev);
+               }
+       }
+
+       return is_all_idle;
+}
+
+static void disconnect_work(struct work_struct *work)
+{
+       rtnl_lock();
+       if (cfg80211_is_all_idle())
+               regulatory_hint_disconnect();
+       rtnl_unlock();
+}
+
+static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
+
+
+/*
+ * API calls for drivers implementing connect/disconnect and
+ * SME event handling
+ */
+
+/* This method must consume bss one way or another */
 void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                               const u8 *req_ie, size_t req_ie_len,
                               const u8 *resp_ie, size_t resp_ie_len,
@@ -434,11 +574,10 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
        ASSERT_WDEV_LOCK(wdev);
 
        if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
-                   wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
-               return;
-
-       if (wdev->sme_state != CFG80211_SME_CONNECTING)
+                   wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) {
+               cfg80211_put_bss(wdev->wiphy, bss);
                return;
+       }
 
        nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev,
                                    bssid, req_ie, req_ie_len,
@@ -476,38 +615,30 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                wdev->current_bss = NULL;
        }
 
-       if (wdev->conn)
-               wdev->conn->state = CFG80211_CONN_IDLE;
-
        if (status != WLAN_STATUS_SUCCESS) {
-               wdev->sme_state = CFG80211_SME_IDLE;
-               if (wdev->conn)
-                       kfree(wdev->conn->ie);
-               kfree(wdev->conn);
-               wdev->conn = NULL;
                kfree(wdev->connect_keys);
                wdev->connect_keys = NULL;
                wdev->ssid_len = 0;
-               cfg80211_put_bss(wdev->wiphy, bss);
+               if (bss) {
+                       cfg80211_unhold_bss(bss_from_pub(bss));
+                       cfg80211_put_bss(wdev->wiphy, bss);
+               }
                return;
        }
 
-       if (!bss)
-               bss = cfg80211_get_bss(wdev->wiphy,
-                                      wdev->conn ? wdev->conn->params.channel :
-                                      NULL,
-                                      bssid,
+       if (!bss) {
+               WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
+               bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
                                       wdev->ssid, wdev->ssid_len,
                                       WLAN_CAPABILITY_ESS,
                                       WLAN_CAPABILITY_ESS);
+               if (WARN_ON(!bss))
+                       return;
+               cfg80211_hold_bss(bss_from_pub(bss));
+       }
 
-       if (WARN_ON(!bss))
-               return;
-
-       cfg80211_hold_bss(bss_from_pub(bss));
        wdev->current_bss = bss_from_pub(bss);
 
-       wdev->sme_state = CFG80211_SME_CONNECTED;
        cfg80211_upload_connect_keys(wdev);
 
        rcu_read_lock();
@@ -543,8 +674,6 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
        struct cfg80211_event *ev;
        unsigned long flags;
 
-       CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
-
        ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
        if (!ev)
                return;
@@ -571,6 +700,7 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
 }
 EXPORT_SYMBOL(cfg80211_connect_result);
 
+/* Consumes bss object one way or another */
 void __cfg80211_roamed(struct wireless_dev *wdev,
                       struct cfg80211_bss *bss,
                       const u8 *req_ie, size_t req_ie_len,
@@ -585,14 +715,9 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
                    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
                goto out;
 
-       if (wdev->sme_state != CFG80211_SME_CONNECTED)
+       if (WARN_ON(!wdev->current_bss))
                goto out;
 
-       /* internal error -- how did we get to CONNECTED w/o BSS? */
-       if (WARN_ON(!wdev->current_bss)) {
-               goto out;
-       }
-
        cfg80211_unhold_bss(wdev->current_bss);
        cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
        wdev->current_bss = NULL;
@@ -641,8 +766,6 @@ void cfg80211_roamed(struct net_device *dev,
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_bss *bss;
 
-       CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
-
        bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
                               wdev->ssid_len, WLAN_CAPABILITY_ESS,
                               WLAN_CAPABILITY_ESS);
@@ -654,6 +777,7 @@ void cfg80211_roamed(struct net_device *dev,
 }
 EXPORT_SYMBOL(cfg80211_roamed);
 
+/* Consumes bss object one way or another */
 void cfg80211_roamed_bss(struct net_device *dev,
                         struct cfg80211_bss *bss, const u8 *req_ie,
                         size_t req_ie_len, const u8 *resp_ie,
@@ -664,8 +788,6 @@ void cfg80211_roamed_bss(struct net_device *dev,
        struct cfg80211_event *ev;
        unsigned long flags;
 
-       CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
-
        if (WARN_ON(!bss))
                return;
 
@@ -707,25 +829,14 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
                    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
                return;
 
-       if (wdev->sme_state != CFG80211_SME_CONNECTED)
-               return;
-
        if (wdev->current_bss) {
                cfg80211_unhold_bss(wdev->current_bss);
                cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
        }
 
        wdev->current_bss = NULL;
-       wdev->sme_state = CFG80211_SME_IDLE;
        wdev->ssid_len = 0;
 
-       if (wdev->conn) {
-               kfree(wdev->conn->ie);
-               wdev->conn->ie = NULL;
-               kfree(wdev->conn);
-               wdev->conn = NULL;
-       }
-
        nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
 
        /*
@@ -754,8 +865,6 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason,
        struct cfg80211_event *ev;
        unsigned long flags;
 
-       CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
-
        ev = kzalloc(sizeof(*ev) + ie_len, gfp);
        if (!ev)
                return;
@@ -773,21 +882,20 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason,
 }
 EXPORT_SYMBOL(cfg80211_disconnected);
 
-int __cfg80211_connect(struct cfg80211_registered_device *rdev,
-                      struct net_device *dev,
-                      struct cfg80211_connect_params *connect,
-                      struct cfg80211_cached_keys *connkeys,
-                      const u8 *prev_bssid)
+/*
+ * API calls for nl80211/wext compatibility code
+ */
+int cfg80211_connect(struct cfg80211_registered_device *rdev,
+                    struct net_device *dev,
+                    struct cfg80211_connect_params *connect,
+                    struct cfg80211_cached_keys *connkeys,
+                    const u8 *prev_bssid)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_bss *bss = NULL;
        int err;
 
        ASSERT_WDEV_LOCK(wdev);
 
-       if (wdev->sme_state != CFG80211_SME_IDLE)
-               return -EALREADY;
-
        if (WARN_ON(wdev->connect_keys)) {
                kfree(wdev->connect_keys);
                wdev->connect_keys = NULL;
@@ -823,219 +931,43 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev,
                }
        }
 
-       if (!rdev->ops->connect) {
-               if (!rdev->ops->auth || !rdev->ops->assoc)
-                       return -EOPNOTSUPP;
-
-               if (WARN_ON(wdev->conn))
-                       return -EINPROGRESS;
-
-               wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL);
-               if (!wdev->conn)
-                       return -ENOMEM;
-
-               /*
-                * Copy all parameters, and treat explicitly IEs, BSSID, SSID.
-                */
-               memcpy(&wdev->conn->params, connect, sizeof(*connect));
-               if (connect->bssid) {
-                       wdev->conn->params.bssid = wdev->conn->bssid;
-                       memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN);
-               }
-
-               if (connect->ie) {
-                       wdev->conn->ie = kmemdup(connect->ie, connect->ie_len,
-                                               GFP_KERNEL);
-                       wdev->conn->params.ie = wdev->conn->ie;
-                       if (!wdev->conn->ie) {
-                               kfree(wdev->conn);
-                               wdev->conn = NULL;
-                               return -ENOMEM;
-                       }
-               }
-
-               if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) {
-                       wdev->conn->auto_auth = true;
-                       /* start with open system ... should mostly work */
-                       wdev->conn->params.auth_type =
-                               NL80211_AUTHTYPE_OPEN_SYSTEM;
-               } else {
-                       wdev->conn->auto_auth = false;
-               }
-
-               memcpy(wdev->ssid, connect->ssid, connect->ssid_len);
-               wdev->ssid_len = connect->ssid_len;
-               wdev->conn->params.ssid = wdev->ssid;
-               wdev->conn->params.ssid_len = connect->ssid_len;
-
-               /* see if we have the bss already */
-               bss = cfg80211_get_conn_bss(wdev);
-
-               wdev->sme_state = CFG80211_SME_CONNECTING;
-               wdev->connect_keys = connkeys;
-
-               if (prev_bssid) {
-                       memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN);
-                       wdev->conn->prev_bssid_valid = true;
-               }
-
-               /* we're good if we have a matching bss struct */
-               if (bss) {
-                       wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
-                       err = cfg80211_conn_do_work(wdev);
-                       cfg80211_put_bss(wdev->wiphy, bss);
-               } else {
-                       /* otherwise we'll need to scan for the AP first */
-                       err = cfg80211_conn_scan(wdev);
-                       /*
-                        * If we can't scan right now, then we need to scan again
-                        * after the current scan finished, since the parameters
-                        * changed (unless we find a good AP anyway).
-                        */
-                       if (err == -EBUSY) {
-                               err = 0;
-                               wdev->conn->state = CFG80211_CONN_SCAN_AGAIN;
-                       }
-               }
-               if (err) {
-                       kfree(wdev->conn->ie);
-                       kfree(wdev->conn);
-                       wdev->conn = NULL;
-                       wdev->sme_state = CFG80211_SME_IDLE;
-                       wdev->connect_keys = NULL;
-                       wdev->ssid_len = 0;
-               }
+       wdev->connect_keys = connkeys;
+       memcpy(wdev->ssid, connect->ssid, connect->ssid_len);
+       wdev->ssid_len = connect->ssid_len;
 
-               return err;
-       } else {
-               wdev->sme_state = CFG80211_SME_CONNECTING;
-               wdev->connect_keys = connkeys;
+       if (!rdev->ops->connect)
+               err = cfg80211_sme_connect(wdev, connect, prev_bssid);
+       else
                err = rdev_connect(rdev, dev, connect);
-               if (err) {
-                       wdev->connect_keys = NULL;
-                       wdev->sme_state = CFG80211_SME_IDLE;
-                       return err;
-               }
-
-               memcpy(wdev->ssid, connect->ssid, connect->ssid_len);
-               wdev->ssid_len = connect->ssid_len;
 
-               return 0;
+       if (err) {
+               wdev->connect_keys = NULL;
+               wdev->ssid_len = 0;
+               return err;
        }
-}
-
-int cfg80211_connect(struct cfg80211_registered_device *rdev,
-                    struct net_device *dev,
-                    struct cfg80211_connect_params *connect,
-                    struct cfg80211_cached_keys *connkeys)
-{
-       int err;
-
-       mutex_lock(&rdev->devlist_mtx);
-       /* might request scan - scan_mtx -> wdev_mtx dependency */
-       mutex_lock(&rdev->sched_scan_mtx);
-       wdev_lock(dev->ieee80211_ptr);
-       err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL);
-       wdev_unlock(dev->ieee80211_ptr);
-       mutex_unlock(&rdev->sched_scan_mtx);
-       mutex_unlock(&rdev->devlist_mtx);
 
-       return err;
+       return 0;
 }
 
-int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
-                         struct net_device *dev, u16 reason, bool wextev)
+int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
+                       struct net_device *dev, u16 reason, bool wextev)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        int err;
 
        ASSERT_WDEV_LOCK(wdev);
 
-       if (wdev->sme_state == CFG80211_SME_IDLE)
-               return -EINVAL;
-
        kfree(wdev->connect_keys);
        wdev->connect_keys = NULL;
 
-       if (!rdev->ops->disconnect) {
-               if (!rdev->ops->deauth)
-                       return -EOPNOTSUPP;
-
-               /* was it connected by userspace SME? */
-               if (!wdev->conn) {
-                       cfg80211_mlme_down(rdev, dev);
-                       goto disconnect;
-               }
-
-               if (wdev->sme_state == CFG80211_SME_CONNECTING &&
-                   (wdev->conn->state == CFG80211_CONN_SCANNING ||
-                    wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)) {
-                       wdev->sme_state = CFG80211_SME_IDLE;
-                       kfree(wdev->conn->ie);
-                       kfree(wdev->conn);
-                       wdev->conn = NULL;
-                       wdev->ssid_len = 0;
-                       return 0;
-               }
-
-               /* wdev->conn->params.bssid must be set if > SCANNING */
-               err = __cfg80211_mlme_deauth(rdev, dev,
-                                            wdev->conn->params.bssid,
-                                            NULL, 0, reason, false);
-               if (err)
-                       return err;
+       if (wdev->conn) {
+               err = cfg80211_sme_disconnect(wdev, reason);
+       } else if (!rdev->ops->disconnect) {
+               cfg80211_mlme_down(rdev, dev);
+               err = 0;
        } else {
                err = rdev_disconnect(rdev, dev, reason);
-               if (err)
-                       return err;
        }
 
- disconnect:
-       if (wdev->sme_state == CFG80211_SME_CONNECTED)
-               __cfg80211_disconnected(dev, NULL, 0, 0, false);
-       else if (wdev->sme_state == CFG80211_SME_CONNECTING)
-               __cfg80211_connect_result(dev, NULL, NULL, 0, NULL, 0,
-                                         WLAN_STATUS_UNSPECIFIED_FAILURE,
-                                         wextev, NULL);
-
-       return 0;
-}
-
-int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
-                       struct net_device *dev,
-                       u16 reason, bool wextev)
-{
-       int err;
-
-       wdev_lock(dev->ieee80211_ptr);
-       err = __cfg80211_disconnect(rdev, dev, reason, wextev);
-       wdev_unlock(dev->ieee80211_ptr);
-
        return err;
 }
-
-void cfg80211_sme_disassoc(struct net_device *dev,
-                          struct cfg80211_internal_bss *bss)
-{
-       struct wireless_dev *wdev = dev->ieee80211_ptr;
-       struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
-       u8 bssid[ETH_ALEN];
-
-       ASSERT_WDEV_LOCK(wdev);
-
-       if (!wdev->conn)
-               return;
-
-       if (wdev->conn->state == CFG80211_CONN_IDLE)
-               return;
-
-       /*
-        * Ok, so the association was made by this SME -- we don't
-        * want it any more so deauthenticate too.
-        */
-
-       memcpy(bssid, bss->pub.bssid, ETH_ALEN);
-
-       __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
-                              WLAN_REASON_DEAUTH_LEAVING, false);
-}
index 8f28b9f798d88a48281a7b633406b99bad4b5e98..a23253e0635872937f1dce0273dae4b959e87c3b 100644 (file)
@@ -83,6 +83,7 @@ static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
        return 0;
 }
 
+#ifdef CONFIG_PM
 static void cfg80211_leave_all(struct cfg80211_registered_device *rdev)
 {
        struct wireless_dev *wdev;
@@ -100,10 +101,10 @@ static int wiphy_suspend(struct device *dev, pm_message_t state)
 
        rtnl_lock();
        if (rdev->wiphy.registered) {
-               if (!rdev->wowlan)
+               if (!rdev->wiphy.wowlan_config)
                        cfg80211_leave_all(rdev);
                if (rdev->ops->suspend)
-                       ret = rdev_suspend(rdev, rdev->wowlan);
+                       ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
                if (ret == 1) {
                        /* Driver refuse to configure wowlan */
                        cfg80211_leave_all(rdev);
@@ -132,6 +133,7 @@ static int wiphy_resume(struct device *dev)
 
        return ret;
 }
+#endif
 
 static const void *wiphy_namespace(struct device *d)
 {
@@ -146,8 +148,10 @@ struct class ieee80211_class = {
        .dev_release = wiphy_dev_release,
        .dev_attrs = ieee80211_dev_attrs,
        .dev_uevent = wiphy_uevent,
+#ifdef CONFIG_PM
        .suspend = wiphy_suspend,
        .resume = wiphy_resume,
+#endif
        .ns_type = &net_ns_type_operations,
        .namespace = wiphy_namespace,
 };
index 5755bc14abbd8220ff39a5f271043b5d4165d947..e1534baf2ebbe5d0fdc41d4e2d6ceb42fd9630f3 100644 (file)
@@ -1911,24 +1911,46 @@ TRACE_EVENT(cfg80211_send_rx_assoc,
                  NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG)
 );
 
-DEFINE_EVENT(netdev_evt_only, __cfg80211_send_deauth,
-       TP_PROTO(struct net_device *netdev),
-       TP_ARGS(netdev)
+DECLARE_EVENT_CLASS(netdev_frame_event,
+       TP_PROTO(struct net_device *netdev, const u8 *buf, int len),
+       TP_ARGS(netdev, buf, len),
+       TP_STRUCT__entry(
+               NETDEV_ENTRY
+               __dynamic_array(u8, frame, len)
+       ),
+       TP_fast_assign(
+               NETDEV_ASSIGN;
+               memcpy(__get_dynamic_array(frame), buf, len);
+       ),
+       TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x",
+                 NETDEV_PR_ARG,
+                 le16_to_cpup((__le16 *)__get_dynamic_array(frame)))
 );
 
-DEFINE_EVENT(netdev_evt_only, __cfg80211_send_disassoc,
-       TP_PROTO(struct net_device *netdev),
-       TP_ARGS(netdev)
+DEFINE_EVENT(netdev_frame_event, cfg80211_rx_unprot_mlme_mgmt,
+       TP_PROTO(struct net_device *netdev, const u8 *buf, int len),
+       TP_ARGS(netdev, buf, len)
 );
 
-DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_deauth,
-       TP_PROTO(struct net_device *netdev),
-       TP_ARGS(netdev)
+DEFINE_EVENT(netdev_frame_event, cfg80211_rx_mlme_mgmt,
+       TP_PROTO(struct net_device *netdev, const u8 *buf, int len),
+       TP_ARGS(netdev, buf, len)
 );
 
-DEFINE_EVENT(netdev_evt_only, cfg80211_send_unprot_disassoc,
-       TP_PROTO(struct net_device *netdev),
-       TP_ARGS(netdev)
+TRACE_EVENT(cfg80211_tx_mlme_mgmt,
+       TP_PROTO(struct net_device *netdev, const u8 *buf, int len),
+       TP_ARGS(netdev, buf, len),
+       TP_STRUCT__entry(
+               NETDEV_ENTRY
+               __dynamic_array(u8, frame, len)
+       ),
+       TP_fast_assign(
+               NETDEV_ASSIGN;
+               memcpy(__get_dynamic_array(frame), buf, len);
+       ),
+       TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x",
+                 NETDEV_PR_ARG,
+                 le16_to_cpup((__le16 *)__get_dynamic_array(frame)))
 );
 
 DECLARE_EVENT_CLASS(netdev_mac_evt,
index f5ad4d94ba88692e2b400e4a404b26a11318815e..74458b7f61eb8bcd00eadd0cd4c24a380c589fb1 100644 (file)
@@ -33,6 +33,29 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
 }
 EXPORT_SYMBOL(ieee80211_get_response_rate);
 
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband)
+{
+       struct ieee80211_rate *bitrates;
+       u32 mandatory_rates = 0;
+       enum ieee80211_rate_flags mandatory_flag;
+       int i;
+
+       if (WARN_ON(!sband))
+               return 1;
+
+       if (sband->band == IEEE80211_BAND_2GHZ)
+               mandatory_flag = IEEE80211_RATE_MANDATORY_B;
+       else
+               mandatory_flag = IEEE80211_RATE_MANDATORY_A;
+
+       bitrates = sband->bitrates;
+       for (i = 0; i < sband->n_bitrates; i++)
+               if (bitrates[i].flags & mandatory_flag)
+                       mandatory_rates |= BIT(i);
+       return mandatory_rates;
+}
+EXPORT_SYMBOL(ieee80211_mandatory_rates);
+
 int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band)
 {
        /* see 802.11 17.3.8.3.2 and Annex J
@@ -785,12 +808,8 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev)
        ASSERT_RTNL();
        ASSERT_RDEV_LOCK(rdev);
 
-       mutex_lock(&rdev->devlist_mtx);
-
        list_for_each_entry(wdev, &rdev->wdev_list, list)
                cfg80211_process_wdev_events(wdev);
-
-       mutex_unlock(&rdev->devlist_mtx);
 }
 
 int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
@@ -822,10 +841,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                return -EBUSY;
 
        if (ntype != otype && netif_running(dev)) {
-               mutex_lock(&rdev->devlist_mtx);
                err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
                                                    ntype);
-               mutex_unlock(&rdev->devlist_mtx);
                if (err)
                        return err;
 
@@ -841,8 +858,10 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
                        break;
                case NL80211_IFTYPE_STATION:
                case NL80211_IFTYPE_P2P_CLIENT:
+                       wdev_lock(dev->ieee80211_ptr);
                        cfg80211_disconnect(rdev, dev,
                                            WLAN_REASON_DEAUTH_LEAVING, true);
+                       wdev_unlock(dev->ieee80211_ptr);
                        break;
                case NL80211_IFTYPE_MESH_POINT:
                        /* mesh should be handled? */
@@ -1169,6 +1188,9 @@ bool ieee80211_operating_class_to_band(u8 operating_class,
        case 84:
                *band = IEEE80211_BAND_2GHZ;
                return true;
+       case 180:
+               *band = IEEE80211_BAND_60GHZ;
+               return true;
        }
 
        return false;
@@ -1184,8 +1206,6 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
        if (!beacon_int)
                return -EINVAL;
 
-       mutex_lock(&rdev->devlist_mtx);
-
        list_for_each_entry(wdev, &rdev->wdev_list, list) {
                if (!wdev->beacon_interval)
                        continue;
@@ -1195,8 +1215,6 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
                }
        }
 
-       mutex_unlock(&rdev->devlist_mtx);
-
        return res;
 }
 
@@ -1220,7 +1238,6 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
        int i, j;
 
        ASSERT_RTNL();
-       lockdep_assert_held(&rdev->devlist_mtx);
 
        if (WARN_ON(hweight32(radar_detect) > 1))
                return -EINVAL;
index d997d0f0c54a683fb0aee287d44fc79d4b15230d..e7c6e862580dcf64df32b4f499f7ae1c42c67609 100644 (file)
@@ -72,7 +72,6 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
        struct cfg80211_registered_device *rdev;
        struct vif_params vifparams;
        enum nl80211_iftype type;
-       int ret;
 
        rdev = wiphy_to_dev(wdev->wiphy);
 
@@ -98,11 +97,7 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
 
        memset(&vifparams, 0, sizeof(vifparams));
 
-       cfg80211_lock_rdev(rdev);
-       ret = cfg80211_change_iface(rdev, dev, type, NULL, &vifparams);
-       cfg80211_unlock_rdev(rdev);
-
-       return ret;
+       return cfg80211_change_iface(rdev, dev, type, NULL, &vifparams);
 }
 EXPORT_SYMBOL_GPL(cfg80211_wext_siwmode);
 
@@ -579,13 +574,10 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
 {
        int err;
 
-       /* devlist mutex needed for possible IBSS re-join */
-       mutex_lock(&rdev->devlist_mtx);
        wdev_lock(dev->ieee80211_ptr);
        err = __cfg80211_set_encryption(rdev, dev, pairwise, addr,
                                        remove, tx_key, idx, params);
        wdev_unlock(dev->ieee80211_ptr);
-       mutex_unlock(&rdev->devlist_mtx);
 
        return err;
 }
@@ -787,7 +779,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
        struct cfg80211_chan_def chandef = {
                .width = NL80211_CHAN_WIDTH_20_NOHT,
        };
-       int freq, err;
+       int freq;
 
        switch (wdev->iftype) {
        case NL80211_IFTYPE_STATION:
@@ -804,10 +796,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
                chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq);
                if (!chandef.chan)
                        return -EINVAL;
-               mutex_lock(&rdev->devlist_mtx);
-               err = cfg80211_set_monitor_channel(rdev, &chandef);
-               mutex_unlock(&rdev->devlist_mtx);
-               return err;
+               return cfg80211_set_monitor_channel(rdev, &chandef);
        case NL80211_IFTYPE_MESH_POINT:
                freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
                if (freq < 0)
@@ -818,10 +807,7 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
                chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq);
                if (!chandef.chan)
                        return -EINVAL;
-               mutex_lock(&rdev->devlist_mtx);
-               err = cfg80211_set_mesh_channel(rdev, wdev, &chandef);
-               mutex_unlock(&rdev->devlist_mtx);
-               return err;
+               return cfg80211_set_mesh_channel(rdev, wdev, &chandef);
        default:
                return -EOPNOTSUPP;
        }
index e79cb5c0655ad34ec0e8df85471e6c77ad856716..14c9a2583ba0926a2e14a777aaa5b69fd09f8963 100644 (file)
@@ -54,8 +54,8 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
        if (wdev->wext.prev_bssid_valid)
                prev_bssid = wdev->wext.prev_bssid;
 
-       err = __cfg80211_connect(rdev, wdev->netdev,
-                                &wdev->wext.connect, ck, prev_bssid);
+       err = cfg80211_connect(rdev, wdev->netdev,
+                              &wdev->wext.connect, ck, prev_bssid);
        if (err)
                kfree(ck);
 
@@ -87,12 +87,9 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
                        return -EINVAL;
        }
 
-       cfg80211_lock_rdev(rdev);
-       mutex_lock(&rdev->devlist_mtx);
-       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(wdev);
 
-       if (wdev->sme_state != CFG80211_SME_IDLE) {
+       if (wdev->conn) {
                bool event = true;
 
                if (wdev->wext.connect.channel == chan) {
@@ -103,8 +100,8 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
                /* if SSID set, we'll try right again, avoid event */
                if (wdev->wext.connect.ssid_len)
                        event = false;
-               err = __cfg80211_disconnect(rdev, dev,
-                                           WLAN_REASON_DEAUTH_LEAVING, event);
+               err = cfg80211_disconnect(rdev, dev,
+                                         WLAN_REASON_DEAUTH_LEAVING, event);
                if (err)
                        goto out;
        }
@@ -136,9 +133,6 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
-       mutex_unlock(&rdev->sched_scan_mtx);
-       mutex_unlock(&rdev->devlist_mtx);
-       cfg80211_unlock_rdev(rdev);
        return err;
 }
 
@@ -190,14 +184,11 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
        if (len > 0 && ssid[len - 1] == '\0')
                len--;
 
-       cfg80211_lock_rdev(rdev);
-       mutex_lock(&rdev->devlist_mtx);
-       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(wdev);
 
        err = 0;
 
-       if (wdev->sme_state != CFG80211_SME_IDLE) {
+       if (wdev->conn) {
                bool event = true;
 
                if (wdev->wext.connect.ssid && len &&
@@ -208,8 +199,8 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
                /* if SSID set now, we'll try to connect, avoid event */
                if (len)
                        event = false;
-               err = __cfg80211_disconnect(rdev, dev,
-                                           WLAN_REASON_DEAUTH_LEAVING, event);
+               err = cfg80211_disconnect(rdev, dev,
+                                         WLAN_REASON_DEAUTH_LEAVING, event);
                if (err)
                        goto out;
        }
@@ -226,9 +217,6 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
-       mutex_unlock(&rdev->sched_scan_mtx);
-       mutex_unlock(&rdev->devlist_mtx);
-       cfg80211_unlock_rdev(rdev);
        return err;
 }
 
@@ -287,12 +275,9 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
        if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid))
                bssid = NULL;
 
-       cfg80211_lock_rdev(rdev);
-       mutex_lock(&rdev->devlist_mtx);
-       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(wdev);
 
-       if (wdev->sme_state != CFG80211_SME_IDLE) {
+       if (wdev->conn) {
                err = 0;
                /* both automatic */
                if (!bssid && !wdev->wext.connect.bssid)
@@ -303,8 +288,8 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
                    ether_addr_equal(bssid, wdev->wext.connect.bssid))
                        goto out;
 
-               err = __cfg80211_disconnect(rdev, dev,
-                                           WLAN_REASON_DEAUTH_LEAVING, false);
+               err = cfg80211_disconnect(rdev, dev,
+                                         WLAN_REASON_DEAUTH_LEAVING, false);
                if (err)
                        goto out;
        }
@@ -318,9 +303,6 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
-       mutex_unlock(&rdev->sched_scan_mtx);
-       mutex_unlock(&rdev->devlist_mtx);
-       cfg80211_unlock_rdev(rdev);
        return err;
 }
 
@@ -382,9 +364,9 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
        wdev->wext.ie = ie;
        wdev->wext.ie_len = ie_len;
 
-       if (wdev->sme_state != CFG80211_SME_IDLE) {
-               err = __cfg80211_disconnect(rdev, dev,
-                                           WLAN_REASON_DEAUTH_LEAVING, false);
+       if (wdev->conn) {
+               err = cfg80211_disconnect(rdev, dev,
+                                         WLAN_REASON_DEAUTH_LEAVING, false);
                if (err)
                        goto out;
        }
@@ -420,8 +402,7 @@ int cfg80211_wext_siwmlme(struct net_device *dev,
        switch (mlme->cmd) {
        case IW_MLME_DEAUTH:
        case IW_MLME_DISASSOC:
-               err = __cfg80211_disconnect(rdev, dev, mlme->reason_code,
-                                           true);
+               err = cfg80211_disconnect(rdev, dev, mlme->reason_code, true);
                break;
        default:
                err = -EOPNOTSUPP;
index 071ce1b5f2b40bb7b6d1558ee28f42fc1a99dd96..872d59e35ee23583cc60a2be24afa5ba1bf3c88d 100644 (file)
@@ -583,8 +583,6 @@ static int atmel_abdac_remove(struct platform_device *pdev)
        free_irq(dac->irq, dac);
        snd_card_free(card);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 6b7e2b5a72dead0fe1e542fa47460e3184929a0d..ae63d22c0f883e48ddb0b37ee6aad40ac6f8943c 100644 (file)
@@ -1199,8 +1199,6 @@ static int atmel_ac97c_remove(struct platform_device *pdev)
        snd_card_set_dev(card, NULL);
        snd_card_free(card);
 
-       platform_set_drvdata(pdev, NULL);
-
        return 0;
 }
 
index 7420c59444ab41c442cb774be05f869019c5fc86..2b7f6e8bdd24343bd41ce854bb93383815386d6b 100644 (file)
@@ -922,7 +922,6 @@ static int hal2_remove(struct platform_device *pdev)
        struct snd_card *card = platform_get_drvdata(pdev);
 
        snd_card_free(card);
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 01a03efdc8b042059668e24274ac25799af63029..cfe99ae149fed9f55d70dc10f23e12e59fa4ee34 100644 (file)
@@ -963,7 +963,6 @@ static int snd_sgio2audio_remove(struct platform_device *pdev)
        struct snd_card *card = platform_get_drvdata(pdev);
 
        snd_card_free(card);
-       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
index 5849b129e50d2c73802f53369bbae3776cf4b08d..51c4ba95a32d3961375a593d41afe538ab787760 100644 (file)
@@ -250,6 +250,7 @@ config MSND_FIFOSIZE
 menuconfig SOUND_OSS
        tristate "OSS sound modules"
        depends on ISA_DMA_API && VIRT_TO_BUS
+       depends on !ISA_DMA_SUPPORT_BROKEN
        help
          OSS is the Open Sound System suite of sound card drivers.  They make
          sound programming easier since they provide a common API.  Say Y or
index 6f9b64700f6e48ec859bb7153a5bc119812062b4..55108b5fb2919c597177d9dc5f28b56cb34d28a2 100644 (file)
@@ -681,6 +681,9 @@ int snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex)
        struct hda_bus_unsolicited *unsol;
        unsigned int wp;
 
+       if (!bus || !bus->workq)
+               return 0;
+
        trace_hda_unsol_event(bus, res, res_ex);
        unsol = bus->unsol;
        if (!unsol)
@@ -1580,7 +1583,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
                    "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n",
                    nid, stream_tag, channel_id, format);
        p = get_hda_cvt_setup(codec, nid);
-       if (!p || p->active)
+       if (!p)
                return;
 
        if (codec->pcm_format_first)
@@ -1627,7 +1630,7 @@ void __snd_hda_codec_cleanup_stream(struct hda_codec *codec, hda_nid_t nid,
 
        snd_printdd("hda_codec_cleanup_stream: NID=0x%x\n", nid);
        p = get_hda_cvt_setup(codec, nid);
-       if (p && p->active) {
+       if (p) {
                /* here we just clear the active flag when do_now isn't set;
                 * actual clean-ups will be done later in
                 * purify_inactive_streams() called from snd_hda_codec_prpapre()
index 7b213d589ef654ba27b7056a4dd77b5b091de49c..de18722c487346858783fa5d5e9f9fd574b70b39 100644 (file)
@@ -615,7 +615,7 @@ enum {
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
        (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\
-        AZX_DCAPS_ALIGN_BUFSIZE)
+        AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT)
 
 #define AZX_DCAPS_PRESET_CTHDA \
        (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
index 84b81c874a4a10413eaa807443b27330117463e5..b314d3e6d7fae5d0a576ccbe14eb45c54dfde2f9 100644 (file)
@@ -64,6 +64,7 @@ struct conexant_spec {
        /* extra EAPD pins */
        unsigned int num_eapds;
        hda_nid_t eapds[4];
+       bool dynamic_eapd;
 
 #ifdef ENABLE_CXT_STATIC_QUIRKS
        const struct snd_kcontrol_new *mixers[5];
@@ -3155,7 +3156,7 @@ static void cx_auto_parse_eapd(struct hda_codec *codec)
         * thus it might control over all pins.
         */
        if (spec->num_eapds > 2)
-               spec->gen.own_eapd_ctl = 1;
+               spec->dynamic_eapd = 1;
 }
 
 static void cx_auto_turn_eapd(struct hda_codec *codec, int num_pins,
@@ -3194,10 +3195,19 @@ static int cx_auto_build_controls(struct hda_codec *codec)
        return 0;
 }
 
+static int cx_auto_init(struct hda_codec *codec)
+{
+       struct conexant_spec *spec = codec->spec;
+       snd_hda_gen_init(codec);
+       if (!spec->dynamic_eapd)
+               cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
+       return 0;
+}
+
 static const struct hda_codec_ops cx_auto_patch_ops = {
        .build_controls = cx_auto_build_controls,
        .build_pcms = snd_hda_gen_build_pcms,
-       .init = snd_hda_gen_init,
+       .init = cx_auto_init,
        .free = snd_hda_gen_free,
        .unsol_event = snd_hda_jack_unsol_event,
 #ifdef CONFIG_PM
@@ -3348,7 +3358,8 @@ static int patch_conexant_auto(struct hda_codec *codec)
 
        cx_auto_parse_beep(codec);
        cx_auto_parse_eapd(codec);
-       if (spec->gen.own_eapd_ctl)
+       spec->gen.own_eapd_ctl = 1;
+       if (spec->dynamic_eapd)
                spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook;
 
        switch (codec->vendor_id) {
index 32930e66885452f0c32ad483842cb9f8b1a57701..e12f7a030c58efae2c5a93829ec6bfce1fb17ac7 100644 (file)
@@ -1832,12 +1832,10 @@ static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
 #define INTEL_EN_ALL_PIN_CVTS  0x01 /* enable 2nd & 3rd pins and convertors */
 
 static void intel_haswell_enable_all_pins(struct hda_codec *codec,
-                                       const struct hda_fixup *fix, int action)
+                                         bool update_tree)
 {
        unsigned int vendor_param;
 
-       if (action != HDA_FIXUP_ACT_PRE_PROBE)
-               return;
        vendor_param = snd_hda_codec_read(codec, INTEL_VENDOR_NID, 0,
                                INTEL_GET_VENDOR_VERB, 0);
        if (vendor_param == -1 || vendor_param & INTEL_EN_ALL_PIN_CVTS)
@@ -1849,8 +1847,8 @@ static void intel_haswell_enable_all_pins(struct hda_codec *codec,
        if (vendor_param == -1)
                return;
 
-       snd_hda_codec_update_widgets(codec);
-       return;
+       if (update_tree)
+               snd_hda_codec_update_widgets(codec);
 }
 
 static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec)
@@ -1868,30 +1866,20 @@ static void intel_haswell_fixup_enable_dp12(struct hda_codec *codec)
                                INTEL_SET_VENDOR_VERB, vendor_param);
 }
 
+/* Haswell needs to re-issue the vendor-specific verbs before turning to D0.
+ * Otherwise you may get severe h/w communication errors.
+ */
+static void haswell_set_power_state(struct hda_codec *codec, hda_nid_t fg,
+                               unsigned int power_state)
+{
+       if (power_state == AC_PWRST_D0) {
+               intel_haswell_enable_all_pins(codec, false);
+               intel_haswell_fixup_enable_dp12(codec);
+       }
 
-
-/* available models for fixup */
-enum {
-       INTEL_HASWELL,
-};
-
-static const struct hda_model_fixup hdmi_models[] = {
-       {.id = INTEL_HASWELL, .name = "Haswell"},
-       {}
-};
-
-static const struct snd_pci_quirk hdmi_fixup_tbl[] = {
-       SND_PCI_QUIRK(0x8086, 0x2010, "Haswell", INTEL_HASWELL),
-       {} /* terminator */
-};
-
-static const struct hda_fixup hdmi_fixups[] = {
-       [INTEL_HASWELL] = {
-               .type = HDA_FIXUP_FUNC,
-               .v.func = intel_haswell_enable_all_pins,
-       },
-};
-
+       snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE, power_state);
+       snd_hda_codec_set_power_to_all(codec, fg, power_state);
+}
 
 static int patch_generic_hdmi(struct hda_codec *codec)
 {
@@ -1904,11 +1892,10 @@ static int patch_generic_hdmi(struct hda_codec *codec)
        codec->spec = spec;
        hdmi_array_init(spec, 4);
 
-       snd_hda_pick_fixup(codec, hdmi_models, hdmi_fixup_tbl, hdmi_fixups);
-       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
-
-       if (codec->vendor_id == 0x80862807)
+       if (codec->vendor_id == 0x80862807) {
+               intel_haswell_enable_all_pins(codec, true);
                intel_haswell_fixup_enable_dp12(codec);
+       }
 
        if (hdmi_parse_codec(codec) < 0) {
                codec->spec = NULL;
@@ -1916,6 +1903,9 @@ static int patch_generic_hdmi(struct hda_codec *codec)
                return -EINVAL;
        }
        codec->patch_ops = generic_hdmi_patch_ops;
+       if (codec->vendor_id == 0x80862807)
+               codec->patch_ops.set_power_state = haswell_set_power_state;
+
        generic_hdmi_init_per_pins(codec);
 
        init_channel_allocations();
index 14094f558e031999a9e791843d95fd415296f36c..1eb152cb10970d06a09b98995871581105e6fa51 100644 (file)
@@ -2882,6 +2882,7 @@ static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
                default:
                        return 0;
                }
+               break;
        default:
                return 0;
        }
index 8b85049daab08aa0592760cb3c4f67f2ab5bf7f0..56ecfc72f2e9500ebda81214d972b65f291cd518 100644 (file)
@@ -505,7 +505,10 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
                mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
 
-               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, ACLKX | AFSX);
+               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+                               ACLKX | ACLKR);
+               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+                               AFSX | AFSR);
                break;
        case SND_SOC_DAIFMT_CBM_CFS:
                /* codec is clock master and frame slave */
@@ -565,7 +568,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
                mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
 
-               mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+               mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
                mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
                break;
 
index 21779a6a781a26ddf8a860588a468ac52ef12780..a80c883bb8be29eeba512d86e46af833d472c833 100644 (file)
@@ -1095,9 +1095,9 @@ int dapm_clock_event(struct snd_soc_dapm_widget *w,
 
 #ifdef CONFIG_HAVE_CLK
        if (SND_SOC_DAPM_EVENT_ON(event)) {
-               return clk_enable(w->clk);
+               return clk_prepare_enable(w->clk);
        } else {
-               clk_disable(w->clk);
+               clk_disable_unprepare(w->clk);
                return 0;
        }
 #endif
index 321e066a07533bcf7a18e72c471e01cc1bef9527..9e9d348711953a44006b7c377a898e98c15b131d 100644 (file)
@@ -46,6 +46,7 @@ unsigned int skip_c0;
 unsigned int skip_c1;
 unsigned int do_nhm_cstates;
 unsigned int do_snb_cstates;
+unsigned int do_c8_c9_c10;
 unsigned int has_aperf;
 unsigned int has_epb;
 unsigned int units = 1000000000;       /* Ghz etc */
@@ -120,6 +121,9 @@ struct pkg_data {
        unsigned long long pc3;
        unsigned long long pc6;
        unsigned long long pc7;
+       unsigned long long pc8;
+       unsigned long long pc9;
+       unsigned long long pc10;
        unsigned int package_id;
        unsigned int energy_pkg;        /* MSR_PKG_ENERGY_STATUS */
        unsigned int energy_dram;       /* MSR_DRAM_ENERGY_STATUS */
@@ -282,6 +286,11 @@ void print_header(void)
                outp += sprintf(outp, "   %%pc6");
        if (do_snb_cstates)
                outp += sprintf(outp, "   %%pc7");
+       if (do_c8_c9_c10) {
+               outp += sprintf(outp, "   %%pc8");
+               outp += sprintf(outp, "   %%pc9");
+               outp += sprintf(outp, "  %%pc10");
+       }
 
        if (do_rapl & RAPL_PKG)
                outp += sprintf(outp, "  Pkg_W");
@@ -336,6 +345,9 @@ int dump_counters(struct thread_data *t, struct core_data *c,
                fprintf(stderr, "pc3: %016llX\n", p->pc3);
                fprintf(stderr, "pc6: %016llX\n", p->pc6);
                fprintf(stderr, "pc7: %016llX\n", p->pc7);
+               fprintf(stderr, "pc8: %016llX\n", p->pc8);
+               fprintf(stderr, "pc9: %016llX\n", p->pc9);
+               fprintf(stderr, "pc10: %016llX\n", p->pc10);
                fprintf(stderr, "Joules PKG: %0X\n", p->energy_pkg);
                fprintf(stderr, "Joules COR: %0X\n", p->energy_cores);
                fprintf(stderr, "Joules GFX: %0X\n", p->energy_gfx);
@@ -493,6 +505,11 @@ int format_counters(struct thread_data *t, struct core_data *c,
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
        if (do_snb_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
+       if (do_c8_c9_c10) {
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc8/t->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc9/t->tsc);
+               outp += sprintf(outp, " %6.2f", 100.0 * p->pc10/t->tsc);
+       }
 
        /*
         * If measurement interval exceeds minimum RAPL Joule Counter range,
@@ -569,6 +586,9 @@ delta_package(struct pkg_data *new, struct pkg_data *old)
        old->pc3 = new->pc3 - old->pc3;
        old->pc6 = new->pc6 - old->pc6;
        old->pc7 = new->pc7 - old->pc7;
+       old->pc8 = new->pc8 - old->pc8;
+       old->pc9 = new->pc9 - old->pc9;
+       old->pc10 = new->pc10 - old->pc10;
        old->pkg_temp_c = new->pkg_temp_c;
 
        DELTA_WRAP32(new->energy_pkg, old->energy_pkg);
@@ -702,6 +722,9 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
        p->pc3 = 0;
        p->pc6 = 0;
        p->pc7 = 0;
+       p->pc8 = 0;
+       p->pc9 = 0;
+       p->pc10 = 0;
 
        p->energy_pkg = 0;
        p->energy_dram = 0;
@@ -740,6 +763,9 @@ int sum_counters(struct thread_data *t, struct core_data *c,
        average.packages.pc3 += p->pc3;
        average.packages.pc6 += p->pc6;
        average.packages.pc7 += p->pc7;
+       average.packages.pc8 += p->pc8;
+       average.packages.pc9 += p->pc9;
+       average.packages.pc10 += p->pc10;
 
        average.packages.energy_pkg += p->energy_pkg;
        average.packages.energy_dram += p->energy_dram;
@@ -781,6 +807,10 @@ void compute_average(struct thread_data *t, struct core_data *c,
        average.packages.pc3 /= topo.num_packages;
        average.packages.pc6 /= topo.num_packages;
        average.packages.pc7 /= topo.num_packages;
+
+       average.packages.pc8 /= topo.num_packages;
+       average.packages.pc9 /= topo.num_packages;
+       average.packages.pc10 /= topo.num_packages;
 }
 
 static unsigned long long rdtsc(void)
@@ -880,6 +910,14 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
                        return -12;
        }
+       if (do_c8_c9_c10) {
+               if (get_msr(cpu, MSR_PKG_C8_RESIDENCY, &p->pc8))
+                       return -13;
+               if (get_msr(cpu, MSR_PKG_C9_RESIDENCY, &p->pc9))
+                       return -13;
+               if (get_msr(cpu, MSR_PKG_C10_RESIDENCY, &p->pc10))
+                       return -13;
+       }
        if (do_rapl & RAPL_PKG) {
                if (get_msr(cpu, MSR_PKG_ENERGY_STATUS, &msr))
                        return -13;
@@ -1762,6 +1800,19 @@ int is_snb(unsigned int family, unsigned int model)
        return 0;
 }
 
+int has_c8_c9_c10(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+
+       switch (model) {
+       case 0x45:
+               return 1;
+       }
+       return 0;
+}
+
+
 double discover_bclk(unsigned int family, unsigned int model)
 {
        if (is_snb(family, model))
@@ -1918,6 +1969,7 @@ void check_cpuid()
        do_nhm_cstates = genuine_intel; /* all Intel w/ non-stop TSC have NHM counters */
        do_smi = do_nhm_cstates;
        do_snb_cstates = is_snb(family, model);
+       do_c8_c9_c10 = has_c8_c9_c10(family, model);
        bclk = discover_bclk(family, model);
 
        do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
@@ -2279,7 +2331,7 @@ int main(int argc, char **argv)
        cmdline(argc, argv);
 
        if (verbose)
-               fprintf(stderr, "turbostat v3.3 March 15, 2013"
+               fprintf(stderr, "turbostat v3.4 April 17, 2013"
                        " - Len Brown <lenb@kernel.org>\n");
 
        turbostat_init();
index 45f09362ee7be02df67171efa3508fa225129499..302681c4aa4465bb21b69524d7c0d3a5341f4a4e 100644 (file)
@@ -1978,7 +1978,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
        if (vcpu->kvm->mm != current->mm)
                return -EIO;
 
-#if defined(CONFIG_S390) || defined(CONFIG_PPC)
+#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
        /*
         * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
         * so vcpu_load() would break it.
@@ -3105,13 +3105,21 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
        int r;
        int cpu;
 
-       r = kvm_irqfd_init();
-       if (r)
-               goto out_irqfd;
        r = kvm_arch_init(opaque);
        if (r)
                goto out_fail;
 
+       /*
+        * kvm_arch_init makes sure there's at most one caller
+        * for architectures that support multiple implementations,
+        * like intel and amd on x86.
+        * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
+        * conflicts in case kvm is already setup for another implementation.
+        */
+       r = kvm_irqfd_init();
+       if (r)
+               goto out_irqfd;
+
        if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
                r = -ENOMEM;
                goto out_free_0;
@@ -3186,10 +3194,10 @@ out_free_1:
 out_free_0a:
        free_cpumask_var(cpus_hardware_enabled);
 out_free_0:
-       kvm_arch_exit();
-out_fail:
        kvm_irqfd_exit();
 out_irqfd:
+       kvm_arch_exit();
+out_fail:
        return r;
 }
 EXPORT_SYMBOL_GPL(kvm_init);